4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
100 * NUMA support configuration.
101 * When set, the NUMA support attempts to dispatch the allocation of the
102 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103 * probed ports among the CPU sockets 0 and 1.
104 * Otherwise, all memory is allocated from CPU socket 0.
106 uint8_t numa_support = 0; /**< No numa support by default */
109 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112 uint8_t socket_num = UMA_NO_CONFIG;
115 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120 * Record the Ethernet address of peer target ports to which packets are
122 * Must be instantiated with the ethernet addresses of peer traffic generator
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
129 * Probed Target Environment.
131 struct rte_port *ports; /**< For all probed ethernet ports. */
132 portid_t nb_ports; /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
137 * Test Forwarding Configuration.
138 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t nb_cfg_ports; /**< Number of configured ports. */
144 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
153 * Forwarding engines.
155 struct fwd_engine * fwd_engines[] = {
164 #ifdef RTE_LIBRTE_IEEE1588
165 &ieee1588_fwd_engine,
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
178 * specified on command-line. */
181 * Configuration of packet segments used by the "txonly" processing engine.
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 TXONLY_DEF_PACKET_LEN,
187 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
202 * Configurable number of RX/TX queues.
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
208 * Configurable number of RX/TX ring descriptors.
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 #define RTE_PMD_PARAM_UNSET -1
217 * Configurable values of RX and TX ring threshold registers.
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of RX free threshold.
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX drop enable.
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
239 * Configurable value of TX free threshold.
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX RS bit threshold.
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX queue flags.
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Avoids to check link status when starting/stopping a port.
271 uint8_t no_link_check = 0; /* check by default */
274 * Enable link status change notification
276 uint8_t lsc_interrupt = 1; /* enabled by default */
279 * NIC bypass mode configuration options.
281 #ifdef RTE_NIC_BYPASS
283 /* The NIC bypass watchdog timeout. */
284 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
288 #ifdef RTE_LIBRTE_LATENCY_STATS
291 * Set when latency stats is enabled in the commandline
293 uint8_t latencystats_enabled;
296 * Lcore ID to serive latency statistics.
298 lcoreid_t latencystats_lcore_id = -1;
303 * Ethernet device configuration.
305 struct rte_eth_rxmode rx_mode = {
306 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
308 .header_split = 0, /**< Header Split disabled. */
309 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
310 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
311 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
312 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
313 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
314 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
317 struct rte_fdir_conf fdir_conf = {
318 .mode = RTE_FDIR_MODE_NONE,
319 .pballoc = RTE_FDIR_PBALLOC_64K,
320 .status = RTE_FDIR_REPORT_STATUS,
322 .vlan_tci_mask = 0x0,
324 .src_ip = 0xFFFFFFFF,
325 .dst_ip = 0xFFFFFFFF,
328 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
329 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331 .src_port_mask = 0xFFFF,
332 .dst_port_mask = 0xFFFF,
333 .mac_addr_byte_mask = 0xFF,
334 .tunnel_type_mask = 1,
335 .tunnel_id_mask = 0xFFFFFFFF,
340 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
342 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
343 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
345 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
346 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
348 uint16_t nb_tx_queue_stats_mappings = 0;
349 uint16_t nb_rx_queue_stats_mappings = 0;
351 unsigned max_socket = 0;
353 /* Bitrate statistics */
354 struct rte_stats_bitrates *bitrate_data;
356 /* Forward function declarations */
357 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
358 static void check_all_ports_link_status(uint32_t port_mask);
359 static void eth_event_callback(uint8_t port_id,
360 enum rte_eth_event_type type,
364 * Check if all the ports are started.
365 * If yes, return positive value. If not, return zero.
367 static int all_ports_started(void);
370 * Setup default configuration.
373 set_default_fwd_lcores_config(void)
377 unsigned int sock_num;
380 for (i = 0; i < RTE_MAX_LCORE; i++) {
381 sock_num = rte_lcore_to_socket_id(i) + 1;
382 if (sock_num > max_socket) {
383 if (sock_num > RTE_MAX_NUMA_NODES)
384 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
385 max_socket = sock_num;
387 if (!rte_lcore_is_enabled(i))
389 if (i == rte_get_master_lcore())
391 fwd_lcores_cpuids[nb_lc++] = i;
393 nb_lcores = (lcoreid_t) nb_lc;
394 nb_cfg_lcores = nb_lcores;
399 set_def_peer_eth_addrs(void)
403 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
404 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
405 peer_eth_addrs[i].addr_bytes[5] = i;
410 set_default_fwd_ports_config(void)
414 for (pt_id = 0; pt_id < nb_ports; pt_id++)
415 fwd_ports_ids[pt_id] = pt_id;
417 nb_cfg_ports = nb_ports;
418 nb_fwd_ports = nb_ports;
422 set_def_fwd_config(void)
424 set_default_fwd_lcores_config();
425 set_def_peer_eth_addrs();
426 set_default_fwd_ports_config();
430 * Configuration initialisation done once at init time.
433 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
434 unsigned int socket_id)
436 char pool_name[RTE_MEMPOOL_NAMESIZE];
437 struct rte_mempool *rte_mp = NULL;
440 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
441 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
444 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
445 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
447 #ifdef RTE_LIBRTE_PMD_XENVIRT
448 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
449 (unsigned) mb_mempool_cache,
450 sizeof(struct rte_pktmbuf_pool_private),
451 rte_pktmbuf_pool_init, NULL,
452 rte_pktmbuf_init, NULL,
456 /* if the former XEN allocation failed fall back to normal allocation */
457 if (rte_mp == NULL) {
459 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
460 mb_size, (unsigned) mb_mempool_cache,
461 sizeof(struct rte_pktmbuf_pool_private),
466 if (rte_mempool_populate_anon(rte_mp) == 0) {
467 rte_mempool_free(rte_mp);
471 rte_pktmbuf_pool_init(rte_mp, NULL);
472 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
474 /* wrapper to rte_mempool_create() */
475 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
476 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
481 if (rte_mp == NULL) {
482 rte_exit(EXIT_FAILURE,
483 "Creation of mbuf pool for socket %u failed: %s\n",
484 socket_id, rte_strerror(rte_errno));
485 } else if (verbose_level > 0) {
486 rte_mempool_dump(stdout, rte_mp);
491 * Check given socket id is valid or not with NUMA mode,
492 * if valid, return 0, else return -1
495 check_socket_id(const unsigned int socket_id)
497 static int warning_once = 0;
499 if (socket_id >= max_socket) {
500 if (!warning_once && numa_support)
501 printf("Warning: NUMA should be configured manually by"
502 " using --port-numa-config and"
503 " --ring-numa-config parameters along with"
515 struct rte_port *port;
516 struct rte_mempool *mbp;
517 unsigned int nb_mbuf_per_pool;
519 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
521 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
522 /* Configuration of logical cores. */
523 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
524 sizeof(struct fwd_lcore *) * nb_lcores,
525 RTE_CACHE_LINE_SIZE);
526 if (fwd_lcores == NULL) {
527 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
528 "failed\n", nb_lcores);
530 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
531 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
532 sizeof(struct fwd_lcore),
533 RTE_CACHE_LINE_SIZE);
534 if (fwd_lcores[lc_id] == NULL) {
535 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
538 fwd_lcores[lc_id]->cpuid_idx = lc_id;
542 * Create pools of mbuf.
543 * If NUMA support is disabled, create a single pool of mbuf in
544 * socket 0 memory by default.
545 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
547 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
548 * nb_txd can be configured at run time.
550 if (param_total_num_mbufs)
551 nb_mbuf_per_pool = param_total_num_mbufs;
553 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
554 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
558 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
562 if (socket_num == UMA_NO_CONFIG)
563 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
565 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
569 RTE_ETH_FOREACH_DEV(pid) {
571 rte_eth_dev_info_get(pid, &port->dev_info);
574 if (port_numa[pid] != NUMA_NO_CONFIG)
575 port_per_socket[port_numa[pid]]++;
577 uint32_t socket_id = rte_eth_dev_socket_id(pid);
579 /* if socket_id is invalid, set to 0 */
580 if (check_socket_id(socket_id) < 0)
582 port_per_socket[socket_id]++;
586 /* set flag to initialize port/queue */
587 port->need_reconfig = 1;
588 port->need_reconfig_queues = 1;
593 unsigned int nb_mbuf;
595 if (param_total_num_mbufs)
596 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
598 for (i = 0; i < max_socket; i++) {
599 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
601 mbuf_pool_create(mbuf_data_size,
608 * Records which Mbuf pool to use by each logical core, if needed.
610 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
611 mbp = mbuf_pool_find(
612 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
615 mbp = mbuf_pool_find(0);
616 fwd_lcores[lc_id]->mbp = mbp;
619 /* Configuration of packet forwarding streams. */
620 if (init_fwd_streams() < 0)
621 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
628 reconfig(portid_t new_port_id, unsigned socket_id)
630 struct rte_port *port;
632 /* Reconfiguration of Ethernet ports. */
633 port = &ports[new_port_id];
634 rte_eth_dev_info_get(new_port_id, &port->dev_info);
636 /* set flag to initialize port/queue */
637 port->need_reconfig = 1;
638 port->need_reconfig_queues = 1;
639 port->socket_id = socket_id;
646 init_fwd_streams(void)
649 struct rte_port *port;
650 streamid_t sm_id, nb_fwd_streams_new;
653 /* set socket id according to numa or not */
654 RTE_ETH_FOREACH_DEV(pid) {
656 if (nb_rxq > port->dev_info.max_rx_queues) {
657 printf("Fail: nb_rxq(%d) is greater than "
658 "max_rx_queues(%d)\n", nb_rxq,
659 port->dev_info.max_rx_queues);
662 if (nb_txq > port->dev_info.max_tx_queues) {
663 printf("Fail: nb_txq(%d) is greater than "
664 "max_tx_queues(%d)\n", nb_txq,
665 port->dev_info.max_tx_queues);
669 if (port_numa[pid] != NUMA_NO_CONFIG)
670 port->socket_id = port_numa[pid];
672 port->socket_id = rte_eth_dev_socket_id(pid);
674 /* if socket_id is invalid, set to 0 */
675 if (check_socket_id(port->socket_id) < 0)
680 if (socket_num == UMA_NO_CONFIG)
683 port->socket_id = socket_num;
687 q = RTE_MAX(nb_rxq, nb_txq);
689 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
692 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
693 if (nb_fwd_streams_new == nb_fwd_streams)
696 if (fwd_streams != NULL) {
697 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
698 if (fwd_streams[sm_id] == NULL)
700 rte_free(fwd_streams[sm_id]);
701 fwd_streams[sm_id] = NULL;
703 rte_free(fwd_streams);
708 nb_fwd_streams = nb_fwd_streams_new;
709 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
710 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
711 if (fwd_streams == NULL)
712 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
713 "failed\n", nb_fwd_streams);
715 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
716 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
717 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
718 if (fwd_streams[sm_id] == NULL)
719 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
726 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
728 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
730 unsigned int total_burst;
731 unsigned int nb_burst;
732 unsigned int burst_stats[3];
733 uint16_t pktnb_stats[3];
735 int burst_percent[3];
738 * First compute the total number of packet bursts and the
739 * two highest numbers of bursts of the same number of packets.
742 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
743 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
744 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
745 nb_burst = pbs->pkt_burst_spread[nb_pkt];
748 total_burst += nb_burst;
749 if (nb_burst > burst_stats[0]) {
750 burst_stats[1] = burst_stats[0];
751 pktnb_stats[1] = pktnb_stats[0];
752 burst_stats[0] = nb_burst;
753 pktnb_stats[0] = nb_pkt;
756 if (total_burst == 0)
758 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
759 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
760 burst_percent[0], (int) pktnb_stats[0]);
761 if (burst_stats[0] == total_burst) {
765 if (burst_stats[0] + burst_stats[1] == total_burst) {
766 printf(" + %d%% of %d pkts]\n",
767 100 - burst_percent[0], pktnb_stats[1]);
770 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
771 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
772 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
773 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
776 printf(" + %d%% of %d pkts + %d%% of others]\n",
777 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
779 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
782 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
784 struct rte_port *port;
787 static const char *fwd_stats_border = "----------------------";
789 port = &ports[port_id];
790 printf("\n %s Forward statistics for port %-2d %s\n",
791 fwd_stats_border, port_id, fwd_stats_border);
793 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
794 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
796 stats->ipackets, stats->imissed,
797 (uint64_t) (stats->ipackets + stats->imissed));
799 if (cur_fwd_eng == &csum_fwd_engine)
800 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
801 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
802 if ((stats->ierrors + stats->rx_nombuf) > 0) {
803 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
804 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
807 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
809 stats->opackets, port->tx_dropped,
810 (uint64_t) (stats->opackets + port->tx_dropped));
813 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
815 stats->ipackets, stats->imissed,
816 (uint64_t) (stats->ipackets + stats->imissed));
818 if (cur_fwd_eng == &csum_fwd_engine)
819 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
820 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
821 if ((stats->ierrors + stats->rx_nombuf) > 0) {
822 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
823 printf(" RX-nombufs: %14"PRIu64"\n",
827 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
829 stats->opackets, port->tx_dropped,
830 (uint64_t) (stats->opackets + port->tx_dropped));
833 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
835 pkt_burst_stats_display("RX",
836 &port->rx_stream->rx_burst_stats);
838 pkt_burst_stats_display("TX",
839 &port->tx_stream->tx_burst_stats);
842 if (port->rx_queue_stats_mapping_enabled) {
844 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
845 printf(" Stats reg %2d RX-packets:%14"PRIu64
846 " RX-errors:%14"PRIu64
847 " RX-bytes:%14"PRIu64"\n",
848 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
852 if (port->tx_queue_stats_mapping_enabled) {
853 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
854 printf(" Stats reg %2d TX-packets:%14"PRIu64
855 " TX-bytes:%14"PRIu64"\n",
856 i, stats->q_opackets[i], stats->q_obytes[i]);
860 printf(" %s--------------------------------%s\n",
861 fwd_stats_border, fwd_stats_border);
865 fwd_stream_stats_display(streamid_t stream_id)
867 struct fwd_stream *fs;
868 static const char *fwd_top_stats_border = "-------";
870 fs = fwd_streams[stream_id];
871 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
872 (fs->fwd_dropped == 0))
874 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
875 "TX Port=%2d/Queue=%2d %s\n",
876 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
877 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
878 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
879 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
881 /* if checksum mode */
882 if (cur_fwd_eng == &csum_fwd_engine) {
883 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
884 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
887 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
888 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
889 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
894 flush_fwd_rx_queues(void)
896 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
903 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
904 uint64_t timer_period;
906 /* convert to number of cycles */
907 timer_period = rte_get_timer_hz(); /* 1 second timeout */
909 for (j = 0; j < 2; j++) {
910 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
911 for (rxq = 0; rxq < nb_rxq; rxq++) {
912 port_id = fwd_ports_ids[rxp];
914 * testpmd can stuck in the below do while loop
915 * if rte_eth_rx_burst() always returns nonzero
916 * packets. So timer is added to exit this loop
917 * after 1sec timer expiry.
919 prev_tsc = rte_rdtsc();
921 nb_rx = rte_eth_rx_burst(port_id, rxq,
922 pkts_burst, MAX_PKT_BURST);
923 for (i = 0; i < nb_rx; i++)
924 rte_pktmbuf_free(pkts_burst[i]);
926 cur_tsc = rte_rdtsc();
927 diff_tsc = cur_tsc - prev_tsc;
928 timer_tsc += diff_tsc;
929 } while ((nb_rx > 0) &&
930 (timer_tsc < timer_period));
934 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
939 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
941 struct fwd_stream **fsm;
944 #ifdef RTE_LIBRTE_BITRATE
945 uint64_t tics_per_1sec;
947 uint64_t tics_current;
948 uint8_t idx_port, cnt_ports;
950 cnt_ports = rte_eth_dev_count();
951 tics_datum = rte_rdtsc();
952 tics_per_1sec = rte_get_timer_hz();
954 fsm = &fwd_streams[fc->stream_idx];
955 nb_fs = fc->stream_nb;
957 for (sm_id = 0; sm_id < nb_fs; sm_id++)
958 (*pkt_fwd)(fsm[sm_id]);
959 #ifdef RTE_LIBRTE_BITRATE
960 tics_current = rte_rdtsc();
961 if (tics_current - tics_datum >= tics_per_1sec) {
962 /* Periodic bitrate calculation */
963 for (idx_port = 0; idx_port < cnt_ports; idx_port++)
964 rte_stats_bitrate_calc(bitrate_data, idx_port);
965 tics_datum = tics_current;
968 #ifdef RTE_LIBRTE_LATENCY_STATS
969 if (latencystats_lcore_id == rte_lcore_id())
970 rte_latencystats_update();
973 } while (! fc->stopped);
977 start_pkt_forward_on_core(void *fwd_arg)
979 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
980 cur_fwd_config.fwd_eng->packet_fwd);
985 * Run the TXONLY packet forwarding engine to send a single burst of packets.
986 * Used to start communication flows in network loopback test configurations.
989 run_one_txonly_burst_on_core(void *fwd_arg)
991 struct fwd_lcore *fwd_lc;
992 struct fwd_lcore tmp_lcore;
994 fwd_lc = (struct fwd_lcore *) fwd_arg;
996 tmp_lcore.stopped = 1;
997 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1002 * Launch packet forwarding:
1003 * - Setup per-port forwarding context.
1004 * - launch logical cores with their forwarding configuration.
1007 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1009 port_fwd_begin_t port_fwd_begin;
1014 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1015 if (port_fwd_begin != NULL) {
1016 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1017 (*port_fwd_begin)(fwd_ports_ids[i]);
1019 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1020 lc_id = fwd_lcores_cpuids[i];
1021 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1022 fwd_lcores[i]->stopped = 0;
1023 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1024 fwd_lcores[i], lc_id);
1026 printf("launch lcore %u failed - diag=%d\n",
1033 * Launch packet forwarding configuration.
1036 start_packet_forwarding(int with_tx_first)
1038 port_fwd_begin_t port_fwd_begin;
1039 port_fwd_end_t port_fwd_end;
1040 struct rte_port *port;
1045 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1046 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1048 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1049 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1051 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1052 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1053 (!nb_rxq || !nb_txq))
1054 rte_exit(EXIT_FAILURE,
1055 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1056 cur_fwd_eng->fwd_mode_name);
1058 if (all_ports_started() == 0) {
1059 printf("Not all ports were started\n");
1062 if (test_done == 0) {
1063 printf("Packet forwarding already started\n");
1067 if (init_fwd_streams() < 0) {
1068 printf("Fail from init_fwd_streams()\n");
1073 for (i = 0; i < nb_fwd_ports; i++) {
1074 pt_id = fwd_ports_ids[i];
1075 port = &ports[pt_id];
1076 if (!port->dcb_flag) {
1077 printf("In DCB mode, all forwarding ports must "
1078 "be configured in this mode.\n");
1082 if (nb_fwd_lcores == 1) {
1083 printf("In DCB mode,the nb forwarding cores "
1084 "should be larger than 1.\n");
1091 flush_fwd_rx_queues();
1094 pkt_fwd_config_display(&cur_fwd_config);
1095 rxtx_config_display();
1097 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1098 pt_id = fwd_ports_ids[i];
1099 port = &ports[pt_id];
1100 rte_eth_stats_get(pt_id, &port->stats);
1101 port->tx_dropped = 0;
1103 map_port_queue_stats_mapping_registers(pt_id, port);
1105 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1106 fwd_streams[sm_id]->rx_packets = 0;
1107 fwd_streams[sm_id]->tx_packets = 0;
1108 fwd_streams[sm_id]->fwd_dropped = 0;
1109 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1110 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1112 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1113 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1114 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1115 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1116 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1119 fwd_streams[sm_id]->core_cycles = 0;
1122 if (with_tx_first) {
1123 port_fwd_begin = tx_only_engine.port_fwd_begin;
1124 if (port_fwd_begin != NULL) {
1125 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1126 (*port_fwd_begin)(fwd_ports_ids[i]);
1128 while (with_tx_first--) {
1129 launch_packet_forwarding(
1130 run_one_txonly_burst_on_core);
1131 rte_eal_mp_wait_lcore();
1133 port_fwd_end = tx_only_engine.port_fwd_end;
1134 if (port_fwd_end != NULL) {
1135 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1136 (*port_fwd_end)(fwd_ports_ids[i]);
1139 launch_packet_forwarding(start_pkt_forward_on_core);
1143 stop_packet_forwarding(void)
1145 struct rte_eth_stats stats;
1146 struct rte_port *port;
1147 port_fwd_end_t port_fwd_end;
1152 uint64_t total_recv;
1153 uint64_t total_xmit;
1154 uint64_t total_rx_dropped;
1155 uint64_t total_tx_dropped;
1156 uint64_t total_rx_nombuf;
1157 uint64_t tx_dropped;
1158 uint64_t rx_bad_ip_csum;
1159 uint64_t rx_bad_l4_csum;
1160 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1161 uint64_t fwd_cycles;
1163 static const char *acc_stats_border = "+++++++++++++++";
1166 printf("Packet forwarding not started\n");
1169 printf("Telling cores to stop...");
1170 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1171 fwd_lcores[lc_id]->stopped = 1;
1172 printf("\nWaiting for lcores to finish...\n");
1173 rte_eal_mp_wait_lcore();
1174 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1175 if (port_fwd_end != NULL) {
1176 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1177 pt_id = fwd_ports_ids[i];
1178 (*port_fwd_end)(pt_id);
1181 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1184 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1185 if (cur_fwd_config.nb_fwd_streams >
1186 cur_fwd_config.nb_fwd_ports) {
1187 fwd_stream_stats_display(sm_id);
1188 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1189 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1191 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1193 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1196 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1197 tx_dropped = (uint64_t) (tx_dropped +
1198 fwd_streams[sm_id]->fwd_dropped);
1199 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1202 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1203 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1204 fwd_streams[sm_id]->rx_bad_ip_csum);
1205 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1209 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1210 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1211 fwd_streams[sm_id]->rx_bad_l4_csum);
1212 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1215 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1216 fwd_cycles = (uint64_t) (fwd_cycles +
1217 fwd_streams[sm_id]->core_cycles);
1222 total_rx_dropped = 0;
1223 total_tx_dropped = 0;
1224 total_rx_nombuf = 0;
1225 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1226 pt_id = fwd_ports_ids[i];
1228 port = &ports[pt_id];
1229 rte_eth_stats_get(pt_id, &stats);
1230 stats.ipackets -= port->stats.ipackets;
1231 port->stats.ipackets = 0;
1232 stats.opackets -= port->stats.opackets;
1233 port->stats.opackets = 0;
1234 stats.ibytes -= port->stats.ibytes;
1235 port->stats.ibytes = 0;
1236 stats.obytes -= port->stats.obytes;
1237 port->stats.obytes = 0;
1238 stats.imissed -= port->stats.imissed;
1239 port->stats.imissed = 0;
1240 stats.oerrors -= port->stats.oerrors;
1241 port->stats.oerrors = 0;
1242 stats.rx_nombuf -= port->stats.rx_nombuf;
1243 port->stats.rx_nombuf = 0;
1245 total_recv += stats.ipackets;
1246 total_xmit += stats.opackets;
1247 total_rx_dropped += stats.imissed;
1248 total_tx_dropped += port->tx_dropped;
1249 total_rx_nombuf += stats.rx_nombuf;
1251 fwd_port_stats_display(pt_id, &stats);
1253 printf("\n %s Accumulated forward statistics for all ports"
1255 acc_stats_border, acc_stats_border);
1256 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1258 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1260 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1261 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1262 if (total_rx_nombuf > 0)
1263 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1264 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1266 acc_stats_border, acc_stats_border);
1267 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1269 printf("\n CPU cycles/packet=%u (total cycles="
1270 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1271 (unsigned int)(fwd_cycles / total_recv),
1272 fwd_cycles, total_recv);
1274 printf("\nDone.\n");
1279 dev_set_link_up(portid_t pid)
1281 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1282 printf("\nSet link up fail.\n");
1286 dev_set_link_down(portid_t pid)
1288 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1289 printf("\nSet link down fail.\n");
1293 all_ports_started(void)
1296 struct rte_port *port;
1298 RTE_ETH_FOREACH_DEV(pi) {
1300 /* Check if there is a port which is not started */
1301 if ((port->port_status != RTE_PORT_STARTED) &&
1302 (port->slave_flag == 0))
1306 /* No port is not started */
1311 all_ports_stopped(void)
1314 struct rte_port *port;
1316 RTE_ETH_FOREACH_DEV(pi) {
1318 if ((port->port_status != RTE_PORT_STOPPED) &&
1319 (port->slave_flag == 0))
1327 port_is_started(portid_t port_id)
1329 if (port_id_is_invalid(port_id, ENABLED_WARN))
1332 if (ports[port_id].port_status != RTE_PORT_STARTED)
1339 port_is_closed(portid_t port_id)
1341 if (port_id_is_invalid(port_id, ENABLED_WARN))
1344 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1351 start_port(portid_t pid)
1353 int diag, need_check_link_status = -1;
1356 struct rte_port *port;
1357 struct ether_addr mac_addr;
1358 enum rte_eth_event_type event_type;
1360 if (port_id_is_invalid(pid, ENABLED_WARN))
1365 RTE_ETH_FOREACH_DEV(pi) {
1366 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1369 need_check_link_status = 0;
1371 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1372 RTE_PORT_HANDLING) == 0) {
1373 printf("Port %d is now not stopped\n", pi);
1377 if (port->need_reconfig > 0) {
1378 port->need_reconfig = 0;
1380 printf("Configuring Port %d (socket %u)\n", pi,
1382 /* configure port */
1383 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1386 if (rte_atomic16_cmpset(&(port->port_status),
1387 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1388 printf("Port %d can not be set back "
1389 "to stopped\n", pi);
1390 printf("Fail to configure port %d\n", pi);
1391 /* try to reconfigure port next time */
1392 port->need_reconfig = 1;
1396 if (port->need_reconfig_queues > 0) {
1397 port->need_reconfig_queues = 0;
1398 /* setup tx queues */
1399 for (qi = 0; qi < nb_txq; qi++) {
1400 if ((numa_support) &&
1401 (txring_numa[pi] != NUMA_NO_CONFIG))
1402 diag = rte_eth_tx_queue_setup(pi, qi,
1403 nb_txd,txring_numa[pi],
1406 diag = rte_eth_tx_queue_setup(pi, qi,
1407 nb_txd,port->socket_id,
1413 /* Fail to setup tx queue, return */
1414 if (rte_atomic16_cmpset(&(port->port_status),
1416 RTE_PORT_STOPPED) == 0)
1417 printf("Port %d can not be set back "
1418 "to stopped\n", pi);
1419 printf("Fail to configure port %d tx queues\n", pi);
1420 /* try to reconfigure queues next time */
1421 port->need_reconfig_queues = 1;
1424 /* setup rx queues */
1425 for (qi = 0; qi < nb_rxq; qi++) {
1426 if ((numa_support) &&
1427 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1428 struct rte_mempool * mp =
1429 mbuf_pool_find(rxring_numa[pi]);
1431 printf("Failed to setup RX queue:"
1432 "No mempool allocation"
1433 " on the socket %d\n",
1438 diag = rte_eth_rx_queue_setup(pi, qi,
1439 nb_rxd,rxring_numa[pi],
1440 &(port->rx_conf),mp);
1442 struct rte_mempool *mp =
1443 mbuf_pool_find(port->socket_id);
1445 printf("Failed to setup RX queue:"
1446 "No mempool allocation"
1447 " on the socket %d\n",
1451 diag = rte_eth_rx_queue_setup(pi, qi,
1452 nb_rxd,port->socket_id,
1453 &(port->rx_conf), mp);
1458 /* Fail to setup rx queue, return */
1459 if (rte_atomic16_cmpset(&(port->port_status),
1461 RTE_PORT_STOPPED) == 0)
1462 printf("Port %d can not be set back "
1463 "to stopped\n", pi);
1464 printf("Fail to configure port %d rx queues\n", pi);
1465 /* try to reconfigure queues next time */
1466 port->need_reconfig_queues = 1;
1471 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1472 event_type < RTE_ETH_EVENT_MAX;
1474 diag = rte_eth_dev_callback_register(pi,
1479 printf("Failed to setup even callback for event %d\n",
1486 if (rte_eth_dev_start(pi) < 0) {
1487 printf("Fail to start port %d\n", pi);
1489 /* Fail to setup rx queue, return */
1490 if (rte_atomic16_cmpset(&(port->port_status),
1491 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1492 printf("Port %d can not be set back to "
1497 if (rte_atomic16_cmpset(&(port->port_status),
1498 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1499 printf("Port %d can not be set into started\n", pi);
1501 rte_eth_macaddr_get(pi, &mac_addr);
1502 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1503 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1504 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1505 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1507 /* at least one port started, need checking link status */
1508 need_check_link_status = 1;
1511 if (need_check_link_status == 1 && !no_link_check)
1512 check_all_ports_link_status(RTE_PORT_ALL);
1513 else if (need_check_link_status == 0)
1514 printf("Please stop the ports first\n");
1521 stop_port(portid_t pid)
1524 struct rte_port *port;
1525 int need_check_link_status = 0;
1532 if (port_id_is_invalid(pid, ENABLED_WARN))
1535 printf("Stopping ports...\n");
1537 RTE_ETH_FOREACH_DEV(pi) {
1538 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1541 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1542 printf("Please remove port %d from forwarding configuration.\n", pi);
1546 if (port_is_bonding_slave(pi)) {
1547 printf("Please remove port %d from bonded device.\n", pi);
1552 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1553 RTE_PORT_HANDLING) == 0)
1556 rte_eth_dev_stop(pi);
1558 if (rte_atomic16_cmpset(&(port->port_status),
1559 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1560 printf("Port %d can not be set into stopped\n", pi);
1561 need_check_link_status = 1;
1563 if (need_check_link_status && !no_link_check)
1564 check_all_ports_link_status(RTE_PORT_ALL);
1570 close_port(portid_t pid)
1573 struct rte_port *port;
1575 if (port_id_is_invalid(pid, ENABLED_WARN))
1578 printf("Closing ports...\n");
1580 RTE_ETH_FOREACH_DEV(pi) {
1581 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1584 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1585 printf("Please remove port %d from forwarding configuration.\n", pi);
1589 if (port_is_bonding_slave(pi)) {
1590 printf("Please remove port %d from bonded device.\n", pi);
1595 if (rte_atomic16_cmpset(&(port->port_status),
1596 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1597 printf("Port %d is already closed\n", pi);
1601 if (rte_atomic16_cmpset(&(port->port_status),
1602 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1603 printf("Port %d is now not stopped\n", pi);
1607 if (port->flow_list)
1608 port_flow_flush(pi);
1609 rte_eth_dev_close(pi);
1611 if (rte_atomic16_cmpset(&(port->port_status),
1612 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1613 printf("Port %d cannot be set to closed\n", pi);
1620 attach_port(char *identifier)
1623 unsigned int socket_id;
1625 printf("Attaching a new port...\n");
1627 if (identifier == NULL) {
1628 printf("Invalid parameters are specified\n");
1632 if (rte_eth_dev_attach(identifier, &pi))
1635 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1636 /* if socket_id is invalid, set to 0 */
1637 if (check_socket_id(socket_id) < 0)
1639 reconfig(pi, socket_id);
1640 rte_eth_promiscuous_enable(pi);
1642 nb_ports = rte_eth_dev_count();
1644 ports[pi].port_status = RTE_PORT_STOPPED;
1646 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1651 detach_port(uint8_t port_id)
1653 char name[RTE_ETH_NAME_MAX_LEN];
1655 printf("Detaching a port...\n");
1657 if (!port_is_closed(port_id)) {
1658 printf("Please close port first\n");
1662 if (ports[port_id].flow_list)
1663 port_flow_flush(port_id);
1665 if (rte_eth_dev_detach(port_id, name))
1668 nb_ports = rte_eth_dev_count();
1670 printf("Port '%s' is detached. Now total ports is %d\n",
1682 stop_packet_forwarding();
1684 if (ports != NULL) {
1686 RTE_ETH_FOREACH_DEV(pt_id) {
1687 printf("\nShutting down port %d...\n", pt_id);
1693 printf("\nBye...\n");
1696 typedef void (*cmd_func_t)(void);
1697 struct pmd_test_command {
1698 const char *cmd_name;
1699 cmd_func_t cmd_func;
1702 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1704 /* Check the link status of all ports in up to 9s, and print them finally */
1706 check_all_ports_link_status(uint32_t port_mask)
1708 #define CHECK_INTERVAL 100 /* 100ms */
1709 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1710 uint8_t portid, count, all_ports_up, print_flag = 0;
1711 struct rte_eth_link link;
1713 printf("Checking link statuses...\n");
1715 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1717 RTE_ETH_FOREACH_DEV(portid) {
1718 if ((port_mask & (1 << portid)) == 0)
1720 memset(&link, 0, sizeof(link));
1721 rte_eth_link_get_nowait(portid, &link);
1722 /* print link status if flag set */
1723 if (print_flag == 1) {
1724 if (link.link_status)
1725 printf("Port %d Link Up - speed %u "
1726 "Mbps - %s\n", (uint8_t)portid,
1727 (unsigned)link.link_speed,
1728 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1729 ("full-duplex") : ("half-duplex\n"));
1731 printf("Port %d Link Down\n",
1735 /* clear all_ports_up flag if any link down */
1736 if (link.link_status == ETH_LINK_DOWN) {
1741 /* after finally printing all link status, get out */
1742 if (print_flag == 1)
1745 if (all_ports_up == 0) {
1747 rte_delay_ms(CHECK_INTERVAL);
1750 /* set the print_flag if all ports up or timeout */
1751 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1760 /* This function is used by the interrupt thread */
1762 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1764 static const char * const event_desc[] = {
1765 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1766 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1767 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1768 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1769 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1770 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1771 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1772 [RTE_ETH_EVENT_MAX] = NULL,
1775 RTE_SET_USED(param);
1777 if (type >= RTE_ETH_EVENT_MAX) {
1778 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1779 port_id, __func__, type);
1782 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1789 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1793 uint8_t mapping_found = 0;
1795 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1796 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1797 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1798 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1799 tx_queue_stats_mappings[i].queue_id,
1800 tx_queue_stats_mappings[i].stats_counter_id);
1807 port->tx_queue_stats_mapping_enabled = 1;
1812 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1816 uint8_t mapping_found = 0;
1818 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1819 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1820 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1821 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1822 rx_queue_stats_mappings[i].queue_id,
1823 rx_queue_stats_mappings[i].stats_counter_id);
1830 port->rx_queue_stats_mapping_enabled = 1;
1835 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1839 diag = set_tx_queue_stats_mapping_registers(pi, port);
1841 if (diag == -ENOTSUP) {
1842 port->tx_queue_stats_mapping_enabled = 0;
1843 printf("TX queue stats mapping not supported port id=%d\n", pi);
1846 rte_exit(EXIT_FAILURE,
1847 "set_tx_queue_stats_mapping_registers "
1848 "failed for port id=%d diag=%d\n",
1852 diag = set_rx_queue_stats_mapping_registers(pi, port);
1854 if (diag == -ENOTSUP) {
1855 port->rx_queue_stats_mapping_enabled = 0;
1856 printf("RX queue stats mapping not supported port id=%d\n", pi);
1859 rte_exit(EXIT_FAILURE,
1860 "set_rx_queue_stats_mapping_registers "
1861 "failed for port id=%d diag=%d\n",
1867 rxtx_port_config(struct rte_port *port)
1869 port->rx_conf = port->dev_info.default_rxconf;
1870 port->tx_conf = port->dev_info.default_txconf;
1872 /* Check if any RX/TX parameters have been passed */
1873 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1874 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1876 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1877 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1879 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1880 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1882 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1883 port->rx_conf.rx_free_thresh = rx_free_thresh;
1885 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1886 port->rx_conf.rx_drop_en = rx_drop_en;
1888 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1889 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1891 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1892 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1894 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1895 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1897 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1898 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1900 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1901 port->tx_conf.tx_free_thresh = tx_free_thresh;
1903 if (txq_flags != RTE_PMD_PARAM_UNSET)
1904 port->tx_conf.txq_flags = txq_flags;
1908 init_port_config(void)
1911 struct rte_port *port;
1913 RTE_ETH_FOREACH_DEV(pid) {
1915 port->dev_conf.rxmode = rx_mode;
1916 port->dev_conf.fdir_conf = fdir_conf;
1918 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1919 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1921 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1922 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1925 if (port->dcb_flag == 0) {
1926 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1927 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1929 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1932 rxtx_port_config(port);
1934 rte_eth_macaddr_get(pid, &port->eth_addr);
1936 map_port_queue_stats_mapping_registers(pid, port);
1937 #ifdef RTE_NIC_BYPASS
1938 rte_eth_dev_bypass_init(pid);
1941 if (lsc_interrupt &&
1942 (rte_eth_devices[pid].data->dev_flags &
1943 RTE_ETH_DEV_INTR_LSC))
1944 port->dev_conf.intr_conf.lsc = 1;
1948 void set_port_slave_flag(portid_t slave_pid)
1950 struct rte_port *port;
1952 port = &ports[slave_pid];
1953 port->slave_flag = 1;
1956 void clear_port_slave_flag(portid_t slave_pid)
1958 struct rte_port *port;
1960 port = &ports[slave_pid];
1961 port->slave_flag = 0;
1964 uint8_t port_is_bonding_slave(portid_t slave_pid)
1966 struct rte_port *port;
1968 port = &ports[slave_pid];
1969 return port->slave_flag;
1972 const uint16_t vlan_tags[] = {
1973 0, 1, 2, 3, 4, 5, 6, 7,
1974 8, 9, 10, 11, 12, 13, 14, 15,
1975 16, 17, 18, 19, 20, 21, 22, 23,
1976 24, 25, 26, 27, 28, 29, 30, 31
1980 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1981 enum dcb_mode_enable dcb_mode,
1982 enum rte_eth_nb_tcs num_tcs,
1988 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1989 * given above, and the number of traffic classes available for use.
1991 if (dcb_mode == DCB_VT_ENABLED) {
1992 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1993 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1994 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1995 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1997 /* VMDQ+DCB RX and TX configurations */
1998 vmdq_rx_conf->enable_default_pool = 0;
1999 vmdq_rx_conf->default_pool = 0;
2000 vmdq_rx_conf->nb_queue_pools =
2001 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2002 vmdq_tx_conf->nb_queue_pools =
2003 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2005 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2006 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2007 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2008 vmdq_rx_conf->pool_map[i].pools =
2009 1 << (i % vmdq_rx_conf->nb_queue_pools);
2011 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2012 vmdq_rx_conf->dcb_tc[i] = i;
2013 vmdq_tx_conf->dcb_tc[i] = i;
2016 /* set DCB mode of RX and TX of multiple queues */
2017 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2018 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2020 struct rte_eth_dcb_rx_conf *rx_conf =
2021 ð_conf->rx_adv_conf.dcb_rx_conf;
2022 struct rte_eth_dcb_tx_conf *tx_conf =
2023 ð_conf->tx_adv_conf.dcb_tx_conf;
2025 rx_conf->nb_tcs = num_tcs;
2026 tx_conf->nb_tcs = num_tcs;
2028 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2029 rx_conf->dcb_tc[i] = i % num_tcs;
2030 tx_conf->dcb_tc[i] = i % num_tcs;
2032 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2033 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2034 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2038 eth_conf->dcb_capability_en =
2039 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2041 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2047 init_port_dcb_config(portid_t pid,
2048 enum dcb_mode_enable dcb_mode,
2049 enum rte_eth_nb_tcs num_tcs,
2052 struct rte_eth_conf port_conf;
2053 struct rte_port *rte_port;
2057 rte_port = &ports[pid];
2059 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2060 /* Enter DCB configuration status */
2063 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2064 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2067 port_conf.rxmode.hw_vlan_filter = 1;
2070 * Write the configuration into the device.
2071 * Set the numbers of RX & TX queues to 0, so
2072 * the RX & TX queues will not be setup.
2074 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2076 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2078 /* If dev_info.vmdq_pool_base is greater than 0,
2079 * the queue id of vmdq pools is started after pf queues.
2081 if (dcb_mode == DCB_VT_ENABLED &&
2082 rte_port->dev_info.vmdq_pool_base > 0) {
2083 printf("VMDQ_DCB multi-queue mode is nonsensical"
2084 " for port %d.", pid);
2088 /* Assume the ports in testpmd have the same dcb capability
2089 * and has the same number of rxq and txq in dcb mode
2091 if (dcb_mode == DCB_VT_ENABLED) {
2092 if (rte_port->dev_info.max_vfs > 0) {
2093 nb_rxq = rte_port->dev_info.nb_rx_queues;
2094 nb_txq = rte_port->dev_info.nb_tx_queues;
2096 nb_rxq = rte_port->dev_info.max_rx_queues;
2097 nb_txq = rte_port->dev_info.max_tx_queues;
2100 /*if vt is disabled, use all pf queues */
2101 if (rte_port->dev_info.vmdq_pool_base == 0) {
2102 nb_rxq = rte_port->dev_info.max_rx_queues;
2103 nb_txq = rte_port->dev_info.max_tx_queues;
2105 nb_rxq = (queueid_t)num_tcs;
2106 nb_txq = (queueid_t)num_tcs;
2110 rx_free_thresh = 64;
2112 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2114 rxtx_port_config(rte_port);
2116 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2117 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2118 rx_vft_set(pid, vlan_tags[i], 1);
2120 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2121 map_port_queue_stats_mapping_registers(pid, rte_port);
2123 rte_port->dcb_flag = 1;
2131 /* Configuration of Ethernet ports. */
2132 ports = rte_zmalloc("testpmd: ports",
2133 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2134 RTE_CACHE_LINE_SIZE);
2135 if (ports == NULL) {
2136 rte_exit(EXIT_FAILURE,
2137 "rte_zmalloc(%d struct rte_port) failed\n",
2150 signal_handler(int signum)
2152 if (signum == SIGINT || signum == SIGTERM) {
2153 printf("\nSignal %d received, preparing to exit...\n",
2155 #ifdef RTE_LIBRTE_PDUMP
2156 /* uninitialize packet capture framework */
2159 #ifdef RTE_LIBRTE_LATENCY_STATS
2160 rte_latencystats_uninit();
2163 /* exit with the expected status */
2164 signal(signum, SIG_DFL);
2165 kill(getpid(), signum);
2170 main(int argc, char** argv)
2175 signal(SIGINT, signal_handler);
2176 signal(SIGTERM, signal_handler);
2178 diag = rte_eal_init(argc, argv);
2180 rte_panic("Cannot init EAL\n");
2182 #ifdef RTE_LIBRTE_PDUMP
2183 /* initialize packet capture framework */
2184 rte_pdump_init(NULL);
2187 nb_ports = (portid_t) rte_eth_dev_count();
2189 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2191 /* allocate port structures, and init them */
2194 set_def_fwd_config();
2196 rte_panic("Empty set of forwarding logical cores - check the "
2197 "core mask supplied in the command parameters\n");
2202 launch_args_parse(argc, argv);
2204 if (!nb_rxq && !nb_txq)
2205 printf("Warning: Either rx or tx queues should be non-zero\n");
2207 if (nb_rxq > 1 && nb_rxq > nb_txq)
2208 printf("Warning: nb_rxq=%d enables RSS configuration, "
2209 "but nb_txq=%d will prevent to fully test it.\n",
2213 if (start_port(RTE_PORT_ALL) != 0)
2214 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2216 /* set all ports to promiscuous mode by default */
2217 RTE_ETH_FOREACH_DEV(port_id)
2218 rte_eth_promiscuous_enable(port_id);
2220 /* Init metrics library */
2221 rte_metrics_init(rte_socket_id());
2223 #ifdef RTE_LIBRTE_LATENCY_STATS
2224 if (latencystats_enabled != 0) {
2225 int ret = rte_latencystats_init(1, NULL);
2227 printf("Warning: latencystats init()"
2228 " returned error %d\n", ret);
2229 printf("Latencystats running on lcore %d\n",
2230 latencystats_lcore_id);
2234 /* Setup bitrate stats */
2235 #ifdef RTE_LIBRTE_BITRATE
2236 bitrate_data = rte_stats_bitrate_create();
2237 if (bitrate_data == NULL)
2238 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2239 rte_stats_bitrate_reg(bitrate_data);
2243 #ifdef RTE_LIBRTE_CMDLINE
2244 if (interactive == 1) {
2246 printf("Start automatic packet forwarding\n");
2247 start_packet_forwarding(0);
2256 printf("No commandline core given, start packet forwarding\n");
2257 start_packet_forwarding(0);
2258 printf("Press enter to exit\n");
2259 rc = read(0, &c, 1);