4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
100 * NUMA support configuration.
101 * When set, the NUMA support attempts to dispatch the allocation of the
102 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103 * probed ports among the CPU sockets 0 and 1.
104 * Otherwise, all memory is allocated from CPU socket 0.
106 uint8_t numa_support = 0; /**< No numa support by default */
109 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112 uint8_t socket_num = UMA_NO_CONFIG;
115 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120 * Record the Ethernet address of peer target ports to which packets are
122 * Must be instantiated with the ethernet addresses of peer traffic generator
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
129 * Probed Target Environment.
131 struct rte_port *ports; /**< For all probed ethernet ports. */
132 portid_t nb_ports; /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
137 * Test Forwarding Configuration.
138 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t nb_cfg_ports; /**< Number of configured ports. */
144 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
153 * Forwarding engines.
155 struct fwd_engine * fwd_engines[] = {
164 #ifdef RTE_LIBRTE_IEEE1588
165 &ieee1588_fwd_engine,
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
178 * specified on command-line. */
181 * Configuration of packet segments used by the "txonly" processing engine.
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 TXONLY_DEF_PACKET_LEN,
187 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
202 * Configurable number of RX/TX queues.
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
208 * Configurable number of RX/TX ring descriptors.
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 #define RTE_PMD_PARAM_UNSET -1
217 * Configurable values of RX and TX ring threshold registers.
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of RX free threshold.
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX drop enable.
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
239 * Configurable value of TX free threshold.
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX RS bit threshold.
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX queue flags.
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Avoids to check link status when starting/stopping a port.
271 uint8_t no_link_check = 0; /* check by default */
274 * NIC bypass mode configuration options.
276 #ifdef RTE_NIC_BYPASS
278 /* The NIC bypass watchdog timeout. */
279 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
283 #ifdef RTE_LIBRTE_LATENCY_STATS
286 * Set when latency stats is enabled in the commandline
288 uint8_t latencystats_enabled;
291 * Lcore ID to serive latency statistics.
293 lcoreid_t latencystats_lcore_id = -1;
298 * Ethernet device configuration.
300 struct rte_eth_rxmode rx_mode = {
301 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
303 .header_split = 0, /**< Header Split disabled. */
304 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
305 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
306 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
307 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
308 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
309 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
312 struct rte_fdir_conf fdir_conf = {
313 .mode = RTE_FDIR_MODE_NONE,
314 .pballoc = RTE_FDIR_PBALLOC_64K,
315 .status = RTE_FDIR_REPORT_STATUS,
317 .vlan_tci_mask = 0x0,
319 .src_ip = 0xFFFFFFFF,
320 .dst_ip = 0xFFFFFFFF,
323 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
324 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
326 .src_port_mask = 0xFFFF,
327 .dst_port_mask = 0xFFFF,
328 .mac_addr_byte_mask = 0xFF,
329 .tunnel_type_mask = 1,
330 .tunnel_id_mask = 0xFFFFFFFF,
335 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
337 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
338 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
340 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
341 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
343 uint16_t nb_tx_queue_stats_mappings = 0;
344 uint16_t nb_rx_queue_stats_mappings = 0;
346 unsigned max_socket = 0;
348 /* Bitrate statistics */
349 struct rte_stats_bitrates *bitrate_data;
351 /* Forward function declarations */
352 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
353 static void check_all_ports_link_status(uint32_t port_mask);
356 * Check if all the ports are started.
357 * If yes, return positive value. If not, return zero.
359 static int all_ports_started(void);
362 * Find next enabled port
365 find_next_port(portid_t p, struct rte_port *ports, int size)
368 rte_exit(-EINVAL, "failed to find a next port id\n");
370 while ((p < size) && (ports[p].enabled == 0))
376 * Setup default configuration.
379 set_default_fwd_lcores_config(void)
383 unsigned int sock_num;
386 for (i = 0; i < RTE_MAX_LCORE; i++) {
387 sock_num = rte_lcore_to_socket_id(i) + 1;
388 if (sock_num > max_socket) {
389 if (sock_num > RTE_MAX_NUMA_NODES)
390 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
391 max_socket = sock_num;
393 if (!rte_lcore_is_enabled(i))
395 if (i == rte_get_master_lcore())
397 fwd_lcores_cpuids[nb_lc++] = i;
399 nb_lcores = (lcoreid_t) nb_lc;
400 nb_cfg_lcores = nb_lcores;
405 set_def_peer_eth_addrs(void)
409 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
410 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
411 peer_eth_addrs[i].addr_bytes[5] = i;
416 set_default_fwd_ports_config(void)
420 for (pt_id = 0; pt_id < nb_ports; pt_id++)
421 fwd_ports_ids[pt_id] = pt_id;
423 nb_cfg_ports = nb_ports;
424 nb_fwd_ports = nb_ports;
428 set_def_fwd_config(void)
430 set_default_fwd_lcores_config();
431 set_def_peer_eth_addrs();
432 set_default_fwd_ports_config();
436 * Configuration initialisation done once at init time.
439 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
440 unsigned int socket_id)
442 char pool_name[RTE_MEMPOOL_NAMESIZE];
443 struct rte_mempool *rte_mp = NULL;
446 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
447 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
450 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
451 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
453 #ifdef RTE_LIBRTE_PMD_XENVIRT
454 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
455 (unsigned) mb_mempool_cache,
456 sizeof(struct rte_pktmbuf_pool_private),
457 rte_pktmbuf_pool_init, NULL,
458 rte_pktmbuf_init, NULL,
462 /* if the former XEN allocation failed fall back to normal allocation */
463 if (rte_mp == NULL) {
465 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
466 mb_size, (unsigned) mb_mempool_cache,
467 sizeof(struct rte_pktmbuf_pool_private),
472 if (rte_mempool_populate_anon(rte_mp) == 0) {
473 rte_mempool_free(rte_mp);
477 rte_pktmbuf_pool_init(rte_mp, NULL);
478 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
480 /* wrapper to rte_mempool_create() */
481 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
482 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
487 if (rte_mp == NULL) {
488 rte_exit(EXIT_FAILURE,
489 "Creation of mbuf pool for socket %u failed: %s\n",
490 socket_id, rte_strerror(rte_errno));
491 } else if (verbose_level > 0) {
492 rte_mempool_dump(stdout, rte_mp);
497 * Check given socket id is valid or not with NUMA mode,
498 * if valid, return 0, else return -1
501 check_socket_id(const unsigned int socket_id)
503 static int warning_once = 0;
505 if (socket_id >= max_socket) {
506 if (!warning_once && numa_support)
507 printf("Warning: NUMA should be configured manually by"
508 " using --port-numa-config and"
509 " --ring-numa-config parameters along with"
521 struct rte_port *port;
522 struct rte_mempool *mbp;
523 unsigned int nb_mbuf_per_pool;
525 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
527 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
528 /* Configuration of logical cores. */
529 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
530 sizeof(struct fwd_lcore *) * nb_lcores,
531 RTE_CACHE_LINE_SIZE);
532 if (fwd_lcores == NULL) {
533 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
534 "failed\n", nb_lcores);
536 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
537 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
538 sizeof(struct fwd_lcore),
539 RTE_CACHE_LINE_SIZE);
540 if (fwd_lcores[lc_id] == NULL) {
541 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
544 fwd_lcores[lc_id]->cpuid_idx = lc_id;
548 * Create pools of mbuf.
549 * If NUMA support is disabled, create a single pool of mbuf in
550 * socket 0 memory by default.
551 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
553 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
554 * nb_txd can be configured at run time.
556 if (param_total_num_mbufs)
557 nb_mbuf_per_pool = param_total_num_mbufs;
559 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
560 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
564 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
568 if (socket_num == UMA_NO_CONFIG)
569 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
571 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
575 FOREACH_PORT(pid, ports) {
577 rte_eth_dev_info_get(pid, &port->dev_info);
580 if (port_numa[pid] != NUMA_NO_CONFIG)
581 port_per_socket[port_numa[pid]]++;
583 uint32_t socket_id = rte_eth_dev_socket_id(pid);
585 /* if socket_id is invalid, set to 0 */
586 if (check_socket_id(socket_id) < 0)
588 port_per_socket[socket_id]++;
592 /* set flag to initialize port/queue */
593 port->need_reconfig = 1;
594 port->need_reconfig_queues = 1;
599 unsigned int nb_mbuf;
601 if (param_total_num_mbufs)
602 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
604 for (i = 0; i < max_socket; i++) {
605 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
607 mbuf_pool_create(mbuf_data_size,
614 * Records which Mbuf pool to use by each logical core, if needed.
616 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 mbp = mbuf_pool_find(
618 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
621 mbp = mbuf_pool_find(0);
622 fwd_lcores[lc_id]->mbp = mbp;
625 /* Configuration of packet forwarding streams. */
626 if (init_fwd_streams() < 0)
627 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
634 reconfig(portid_t new_port_id, unsigned socket_id)
636 struct rte_port *port;
638 /* Reconfiguration of Ethernet ports. */
639 port = &ports[new_port_id];
640 rte_eth_dev_info_get(new_port_id, &port->dev_info);
642 /* set flag to initialize port/queue */
643 port->need_reconfig = 1;
644 port->need_reconfig_queues = 1;
645 port->socket_id = socket_id;
652 init_fwd_streams(void)
655 struct rte_port *port;
656 streamid_t sm_id, nb_fwd_streams_new;
659 /* set socket id according to numa or not */
660 FOREACH_PORT(pid, ports) {
662 if (nb_rxq > port->dev_info.max_rx_queues) {
663 printf("Fail: nb_rxq(%d) is greater than "
664 "max_rx_queues(%d)\n", nb_rxq,
665 port->dev_info.max_rx_queues);
668 if (nb_txq > port->dev_info.max_tx_queues) {
669 printf("Fail: nb_txq(%d) is greater than "
670 "max_tx_queues(%d)\n", nb_txq,
671 port->dev_info.max_tx_queues);
675 if (port_numa[pid] != NUMA_NO_CONFIG)
676 port->socket_id = port_numa[pid];
678 port->socket_id = rte_eth_dev_socket_id(pid);
680 /* if socket_id is invalid, set to 0 */
681 if (check_socket_id(port->socket_id) < 0)
686 if (socket_num == UMA_NO_CONFIG)
689 port->socket_id = socket_num;
693 q = RTE_MAX(nb_rxq, nb_txq);
695 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
698 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
699 if (nb_fwd_streams_new == nb_fwd_streams)
702 if (fwd_streams != NULL) {
703 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
704 if (fwd_streams[sm_id] == NULL)
706 rte_free(fwd_streams[sm_id]);
707 fwd_streams[sm_id] = NULL;
709 rte_free(fwd_streams);
714 nb_fwd_streams = nb_fwd_streams_new;
715 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
716 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
717 if (fwd_streams == NULL)
718 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
719 "failed\n", nb_fwd_streams);
721 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
722 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
723 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
724 if (fwd_streams[sm_id] == NULL)
725 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
736 unsigned int total_burst;
737 unsigned int nb_burst;
738 unsigned int burst_stats[3];
739 uint16_t pktnb_stats[3];
741 int burst_percent[3];
744 * First compute the total number of packet bursts and the
745 * two highest numbers of bursts of the same number of packets.
748 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
749 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
750 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
751 nb_burst = pbs->pkt_burst_spread[nb_pkt];
754 total_burst += nb_burst;
755 if (nb_burst > burst_stats[0]) {
756 burst_stats[1] = burst_stats[0];
757 pktnb_stats[1] = pktnb_stats[0];
758 burst_stats[0] = nb_burst;
759 pktnb_stats[0] = nb_pkt;
762 if (total_burst == 0)
764 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
765 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
766 burst_percent[0], (int) pktnb_stats[0]);
767 if (burst_stats[0] == total_burst) {
771 if (burst_stats[0] + burst_stats[1] == total_burst) {
772 printf(" + %d%% of %d pkts]\n",
773 100 - burst_percent[0], pktnb_stats[1]);
776 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
777 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
778 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
779 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
782 printf(" + %d%% of %d pkts + %d%% of others]\n",
783 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
790 struct rte_port *port;
793 static const char *fwd_stats_border = "----------------------";
795 port = &ports[port_id];
796 printf("\n %s Forward statistics for port %-2d %s\n",
797 fwd_stats_border, port_id, fwd_stats_border);
799 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
800 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
802 stats->ipackets, stats->imissed,
803 (uint64_t) (stats->ipackets + stats->imissed));
805 if (cur_fwd_eng == &csum_fwd_engine)
806 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
807 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
808 if ((stats->ierrors + stats->rx_nombuf) > 0) {
809 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
810 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
813 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
815 stats->opackets, port->tx_dropped,
816 (uint64_t) (stats->opackets + port->tx_dropped));
819 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
821 stats->ipackets, stats->imissed,
822 (uint64_t) (stats->ipackets + stats->imissed));
824 if (cur_fwd_eng == &csum_fwd_engine)
825 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
826 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
827 if ((stats->ierrors + stats->rx_nombuf) > 0) {
828 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
829 printf(" RX-nombufs: %14"PRIu64"\n",
833 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
835 stats->opackets, port->tx_dropped,
836 (uint64_t) (stats->opackets + port->tx_dropped));
839 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
841 pkt_burst_stats_display("RX",
842 &port->rx_stream->rx_burst_stats);
844 pkt_burst_stats_display("TX",
845 &port->tx_stream->tx_burst_stats);
848 if (port->rx_queue_stats_mapping_enabled) {
850 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
851 printf(" Stats reg %2d RX-packets:%14"PRIu64
852 " RX-errors:%14"PRIu64
853 " RX-bytes:%14"PRIu64"\n",
854 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
858 if (port->tx_queue_stats_mapping_enabled) {
859 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
860 printf(" Stats reg %2d TX-packets:%14"PRIu64
861 " TX-bytes:%14"PRIu64"\n",
862 i, stats->q_opackets[i], stats->q_obytes[i]);
866 printf(" %s--------------------------------%s\n",
867 fwd_stats_border, fwd_stats_border);
871 fwd_stream_stats_display(streamid_t stream_id)
873 struct fwd_stream *fs;
874 static const char *fwd_top_stats_border = "-------";
876 fs = fwd_streams[stream_id];
877 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
878 (fs->fwd_dropped == 0))
880 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
881 "TX Port=%2d/Queue=%2d %s\n",
882 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
883 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
884 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
885 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
887 /* if checksum mode */
888 if (cur_fwd_eng == &csum_fwd_engine) {
889 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
890 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
893 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
894 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
895 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
900 flush_fwd_rx_queues(void)
902 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
909 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
910 uint64_t timer_period;
912 /* convert to number of cycles */
913 timer_period = rte_get_timer_hz(); /* 1 second timeout */
915 for (j = 0; j < 2; j++) {
916 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
917 for (rxq = 0; rxq < nb_rxq; rxq++) {
918 port_id = fwd_ports_ids[rxp];
920 * testpmd can stuck in the below do while loop
921 * if rte_eth_rx_burst() always returns nonzero
922 * packets. So timer is added to exit this loop
923 * after 1sec timer expiry.
925 prev_tsc = rte_rdtsc();
927 nb_rx = rte_eth_rx_burst(port_id, rxq,
928 pkts_burst, MAX_PKT_BURST);
929 for (i = 0; i < nb_rx; i++)
930 rte_pktmbuf_free(pkts_burst[i]);
932 cur_tsc = rte_rdtsc();
933 diff_tsc = cur_tsc - prev_tsc;
934 timer_tsc += diff_tsc;
935 } while ((nb_rx > 0) &&
936 (timer_tsc < timer_period));
940 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
947 struct fwd_stream **fsm;
950 #ifdef RTE_LIBRTE_BITRATE
951 uint64_t tics_per_1sec;
953 uint64_t tics_current;
954 uint8_t idx_port, cnt_ports;
956 cnt_ports = rte_eth_dev_count();
957 tics_datum = rte_rdtsc();
958 tics_per_1sec = rte_get_timer_hz();
960 fsm = &fwd_streams[fc->stream_idx];
961 nb_fs = fc->stream_nb;
963 for (sm_id = 0; sm_id < nb_fs; sm_id++)
964 (*pkt_fwd)(fsm[sm_id]);
965 #ifdef RTE_LIBRTE_BITRATE
966 tics_current = rte_rdtsc();
967 if (tics_current - tics_datum >= tics_per_1sec) {
968 /* Periodic bitrate calculation */
969 for (idx_port = 0; idx_port < cnt_ports; idx_port++)
970 rte_stats_bitrate_calc(bitrate_data, idx_port);
971 tics_datum = tics_current;
974 #ifdef RTE_LIBRTE_LATENCY_STATS
975 if (latencystats_lcore_id == rte_lcore_id())
976 rte_latencystats_update();
979 } while (! fc->stopped);
983 start_pkt_forward_on_core(void *fwd_arg)
985 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
986 cur_fwd_config.fwd_eng->packet_fwd);
991 * Run the TXONLY packet forwarding engine to send a single burst of packets.
992 * Used to start communication flows in network loopback test configurations.
995 run_one_txonly_burst_on_core(void *fwd_arg)
997 struct fwd_lcore *fwd_lc;
998 struct fwd_lcore tmp_lcore;
1000 fwd_lc = (struct fwd_lcore *) fwd_arg;
1001 tmp_lcore = *fwd_lc;
1002 tmp_lcore.stopped = 1;
1003 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1008 * Launch packet forwarding:
1009 * - Setup per-port forwarding context.
1010 * - launch logical cores with their forwarding configuration.
1013 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1015 port_fwd_begin_t port_fwd_begin;
1020 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1021 if (port_fwd_begin != NULL) {
1022 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1023 (*port_fwd_begin)(fwd_ports_ids[i]);
1025 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1026 lc_id = fwd_lcores_cpuids[i];
1027 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1028 fwd_lcores[i]->stopped = 0;
1029 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1030 fwd_lcores[i], lc_id);
1032 printf("launch lcore %u failed - diag=%d\n",
1039 * Launch packet forwarding configuration.
1042 start_packet_forwarding(int with_tx_first)
1044 port_fwd_begin_t port_fwd_begin;
1045 port_fwd_end_t port_fwd_end;
1046 struct rte_port *port;
1051 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1052 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1054 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1055 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1057 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1058 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1059 (!nb_rxq || !nb_txq))
1060 rte_exit(EXIT_FAILURE,
1061 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1062 cur_fwd_eng->fwd_mode_name);
1064 if (all_ports_started() == 0) {
1065 printf("Not all ports were started\n");
1068 if (test_done == 0) {
1069 printf("Packet forwarding already started\n");
1073 if (init_fwd_streams() < 0) {
1074 printf("Fail from init_fwd_streams()\n");
1079 for (i = 0; i < nb_fwd_ports; i++) {
1080 pt_id = fwd_ports_ids[i];
1081 port = &ports[pt_id];
1082 if (!port->dcb_flag) {
1083 printf("In DCB mode, all forwarding ports must "
1084 "be configured in this mode.\n");
1088 if (nb_fwd_lcores == 1) {
1089 printf("In DCB mode,the nb forwarding cores "
1090 "should be larger than 1.\n");
1097 flush_fwd_rx_queues();
1100 pkt_fwd_config_display(&cur_fwd_config);
1101 rxtx_config_display();
1103 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1104 pt_id = fwd_ports_ids[i];
1105 port = &ports[pt_id];
1106 rte_eth_stats_get(pt_id, &port->stats);
1107 port->tx_dropped = 0;
1109 map_port_queue_stats_mapping_registers(pt_id, port);
1111 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1112 fwd_streams[sm_id]->rx_packets = 0;
1113 fwd_streams[sm_id]->tx_packets = 0;
1114 fwd_streams[sm_id]->fwd_dropped = 0;
1115 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1116 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1118 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1119 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1120 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1121 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1122 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1124 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1125 fwd_streams[sm_id]->core_cycles = 0;
1128 if (with_tx_first) {
1129 port_fwd_begin = tx_only_engine.port_fwd_begin;
1130 if (port_fwd_begin != NULL) {
1131 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1132 (*port_fwd_begin)(fwd_ports_ids[i]);
1134 while (with_tx_first--) {
1135 launch_packet_forwarding(
1136 run_one_txonly_burst_on_core);
1137 rte_eal_mp_wait_lcore();
1139 port_fwd_end = tx_only_engine.port_fwd_end;
1140 if (port_fwd_end != NULL) {
1141 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1142 (*port_fwd_end)(fwd_ports_ids[i]);
1145 launch_packet_forwarding(start_pkt_forward_on_core);
1149 stop_packet_forwarding(void)
1151 struct rte_eth_stats stats;
1152 struct rte_port *port;
1153 port_fwd_end_t port_fwd_end;
1158 uint64_t total_recv;
1159 uint64_t total_xmit;
1160 uint64_t total_rx_dropped;
1161 uint64_t total_tx_dropped;
1162 uint64_t total_rx_nombuf;
1163 uint64_t tx_dropped;
1164 uint64_t rx_bad_ip_csum;
1165 uint64_t rx_bad_l4_csum;
1166 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167 uint64_t fwd_cycles;
1169 static const char *acc_stats_border = "+++++++++++++++";
1172 printf("Packet forwarding not started\n");
1175 printf("Telling cores to stop...");
1176 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1177 fwd_lcores[lc_id]->stopped = 1;
1178 printf("\nWaiting for lcores to finish...\n");
1179 rte_eal_mp_wait_lcore();
1180 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1181 if (port_fwd_end != NULL) {
1182 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183 pt_id = fwd_ports_ids[i];
1184 (*port_fwd_end)(pt_id);
1187 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1190 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1191 if (cur_fwd_config.nb_fwd_streams >
1192 cur_fwd_config.nb_fwd_ports) {
1193 fwd_stream_stats_display(sm_id);
1194 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1195 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1197 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1199 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1202 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1203 tx_dropped = (uint64_t) (tx_dropped +
1204 fwd_streams[sm_id]->fwd_dropped);
1205 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1208 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1209 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1210 fwd_streams[sm_id]->rx_bad_ip_csum);
1211 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1215 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1216 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1217 fwd_streams[sm_id]->rx_bad_l4_csum);
1218 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1221 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1222 fwd_cycles = (uint64_t) (fwd_cycles +
1223 fwd_streams[sm_id]->core_cycles);
1228 total_rx_dropped = 0;
1229 total_tx_dropped = 0;
1230 total_rx_nombuf = 0;
1231 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1232 pt_id = fwd_ports_ids[i];
1234 port = &ports[pt_id];
1235 rte_eth_stats_get(pt_id, &stats);
1236 stats.ipackets -= port->stats.ipackets;
1237 port->stats.ipackets = 0;
1238 stats.opackets -= port->stats.opackets;
1239 port->stats.opackets = 0;
1240 stats.ibytes -= port->stats.ibytes;
1241 port->stats.ibytes = 0;
1242 stats.obytes -= port->stats.obytes;
1243 port->stats.obytes = 0;
1244 stats.imissed -= port->stats.imissed;
1245 port->stats.imissed = 0;
1246 stats.oerrors -= port->stats.oerrors;
1247 port->stats.oerrors = 0;
1248 stats.rx_nombuf -= port->stats.rx_nombuf;
1249 port->stats.rx_nombuf = 0;
1251 total_recv += stats.ipackets;
1252 total_xmit += stats.opackets;
1253 total_rx_dropped += stats.imissed;
1254 total_tx_dropped += port->tx_dropped;
1255 total_rx_nombuf += stats.rx_nombuf;
1257 fwd_port_stats_display(pt_id, &stats);
1259 printf("\n %s Accumulated forward statistics for all ports"
1261 acc_stats_border, acc_stats_border);
1262 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1264 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1266 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1267 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1268 if (total_rx_nombuf > 0)
1269 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1270 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1272 acc_stats_border, acc_stats_border);
1273 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1275 printf("\n CPU cycles/packet=%u (total cycles="
1276 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1277 (unsigned int)(fwd_cycles / total_recv),
1278 fwd_cycles, total_recv);
1280 printf("\nDone.\n");
1285 dev_set_link_up(portid_t pid)
1287 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1288 printf("\nSet link up fail.\n");
1292 dev_set_link_down(portid_t pid)
1294 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1295 printf("\nSet link down fail.\n");
1299 all_ports_started(void)
1302 struct rte_port *port;
1304 FOREACH_PORT(pi, ports) {
1306 /* Check if there is a port which is not started */
1307 if ((port->port_status != RTE_PORT_STARTED) &&
1308 (port->slave_flag == 0))
1312 /* No port is not started */
1317 all_ports_stopped(void)
1320 struct rte_port *port;
1322 FOREACH_PORT(pi, ports) {
1324 if ((port->port_status != RTE_PORT_STOPPED) &&
1325 (port->slave_flag == 0))
1333 port_is_started(portid_t port_id)
1335 if (port_id_is_invalid(port_id, ENABLED_WARN))
1338 if (ports[port_id].port_status != RTE_PORT_STARTED)
1345 port_is_closed(portid_t port_id)
1347 if (port_id_is_invalid(port_id, ENABLED_WARN))
1350 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1357 start_port(portid_t pid)
1359 int diag, need_check_link_status = -1;
1362 struct rte_port *port;
1363 struct ether_addr mac_addr;
1365 if (port_id_is_invalid(pid, ENABLED_WARN))
1370 FOREACH_PORT(pi, ports) {
1371 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1374 need_check_link_status = 0;
1376 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1377 RTE_PORT_HANDLING) == 0) {
1378 printf("Port %d is now not stopped\n", pi);
1382 if (port->need_reconfig > 0) {
1383 port->need_reconfig = 0;
1385 printf("Configuring Port %d (socket %u)\n", pi,
1387 /* configure port */
1388 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1391 if (rte_atomic16_cmpset(&(port->port_status),
1392 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393 printf("Port %d can not be set back "
1394 "to stopped\n", pi);
1395 printf("Fail to configure port %d\n", pi);
1396 /* try to reconfigure port next time */
1397 port->need_reconfig = 1;
1401 if (port->need_reconfig_queues > 0) {
1402 port->need_reconfig_queues = 0;
1403 /* setup tx queues */
1404 for (qi = 0; qi < nb_txq; qi++) {
1405 if ((numa_support) &&
1406 (txring_numa[pi] != NUMA_NO_CONFIG))
1407 diag = rte_eth_tx_queue_setup(pi, qi,
1408 nb_txd,txring_numa[pi],
1411 diag = rte_eth_tx_queue_setup(pi, qi,
1412 nb_txd,port->socket_id,
1418 /* Fail to setup tx queue, return */
1419 if (rte_atomic16_cmpset(&(port->port_status),
1421 RTE_PORT_STOPPED) == 0)
1422 printf("Port %d can not be set back "
1423 "to stopped\n", pi);
1424 printf("Fail to configure port %d tx queues\n", pi);
1425 /* try to reconfigure queues next time */
1426 port->need_reconfig_queues = 1;
1429 /* setup rx queues */
1430 for (qi = 0; qi < nb_rxq; qi++) {
1431 if ((numa_support) &&
1432 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1433 struct rte_mempool * mp =
1434 mbuf_pool_find(rxring_numa[pi]);
1436 printf("Failed to setup RX queue:"
1437 "No mempool allocation"
1438 " on the socket %d\n",
1443 diag = rte_eth_rx_queue_setup(pi, qi,
1444 nb_rxd,rxring_numa[pi],
1445 &(port->rx_conf),mp);
1447 struct rte_mempool *mp =
1448 mbuf_pool_find(port->socket_id);
1450 printf("Failed to setup RX queue:"
1451 "No mempool allocation"
1452 " on the socket %d\n",
1456 diag = rte_eth_rx_queue_setup(pi, qi,
1457 nb_rxd,port->socket_id,
1458 &(port->rx_conf), mp);
1463 /* Fail to setup rx queue, return */
1464 if (rte_atomic16_cmpset(&(port->port_status),
1466 RTE_PORT_STOPPED) == 0)
1467 printf("Port %d can not be set back "
1468 "to stopped\n", pi);
1469 printf("Fail to configure port %d rx queues\n", pi);
1470 /* try to reconfigure queues next time */
1471 port->need_reconfig_queues = 1;
1476 if (rte_eth_dev_start(pi) < 0) {
1477 printf("Fail to start port %d\n", pi);
1479 /* Fail to setup rx queue, return */
1480 if (rte_atomic16_cmpset(&(port->port_status),
1481 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1482 printf("Port %d can not be set back to "
1487 if (rte_atomic16_cmpset(&(port->port_status),
1488 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1489 printf("Port %d can not be set into started\n", pi);
1491 rte_eth_macaddr_get(pi, &mac_addr);
1492 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1493 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1494 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1495 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1497 /* at least one port started, need checking link status */
1498 need_check_link_status = 1;
1501 if (need_check_link_status == 1 && !no_link_check)
1502 check_all_ports_link_status(RTE_PORT_ALL);
1503 else if (need_check_link_status == 0)
1504 printf("Please stop the ports first\n");
1511 stop_port(portid_t pid)
1514 struct rte_port *port;
1515 int need_check_link_status = 0;
1522 if (port_id_is_invalid(pid, ENABLED_WARN))
1525 printf("Stopping ports...\n");
1527 FOREACH_PORT(pi, ports) {
1528 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1531 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1532 printf("Please remove port %d from forwarding configuration.\n", pi);
1536 if (port_is_bonding_slave(pi)) {
1537 printf("Please remove port %d from bonded device.\n", pi);
1542 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1543 RTE_PORT_HANDLING) == 0)
1546 rte_eth_dev_stop(pi);
1548 if (rte_atomic16_cmpset(&(port->port_status),
1549 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1550 printf("Port %d can not be set into stopped\n", pi);
1551 need_check_link_status = 1;
1553 if (need_check_link_status && !no_link_check)
1554 check_all_ports_link_status(RTE_PORT_ALL);
1560 close_port(portid_t pid)
1563 struct rte_port *port;
1565 if (port_id_is_invalid(pid, ENABLED_WARN))
1568 printf("Closing ports...\n");
1570 FOREACH_PORT(pi, ports) {
1571 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1574 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1575 printf("Please remove port %d from forwarding configuration.\n", pi);
1579 if (port_is_bonding_slave(pi)) {
1580 printf("Please remove port %d from bonded device.\n", pi);
1585 if (rte_atomic16_cmpset(&(port->port_status),
1586 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1587 printf("Port %d is already closed\n", pi);
1591 if (rte_atomic16_cmpset(&(port->port_status),
1592 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1593 printf("Port %d is now not stopped\n", pi);
1597 if (port->flow_list)
1598 port_flow_flush(pi);
1599 rte_eth_dev_close(pi);
1601 if (rte_atomic16_cmpset(&(port->port_status),
1602 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1603 printf("Port %d cannot be set to closed\n", pi);
1610 attach_port(char *identifier)
1613 unsigned int socket_id;
1615 printf("Attaching a new port...\n");
1617 if (identifier == NULL) {
1618 printf("Invalid parameters are specified\n");
1622 if (rte_eth_dev_attach(identifier, &pi))
1625 ports[pi].enabled = 1;
1626 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1627 /* if socket_id is invalid, set to 0 */
1628 if (check_socket_id(socket_id) < 0)
1630 reconfig(pi, socket_id);
1631 rte_eth_promiscuous_enable(pi);
1633 nb_ports = rte_eth_dev_count();
1635 ports[pi].port_status = RTE_PORT_STOPPED;
1637 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1642 detach_port(uint8_t port_id)
1644 char name[RTE_ETH_NAME_MAX_LEN];
1646 printf("Detaching a port...\n");
1648 if (!port_is_closed(port_id)) {
1649 printf("Please close port first\n");
1653 if (ports[port_id].flow_list)
1654 port_flow_flush(port_id);
1656 if (rte_eth_dev_detach(port_id, name))
1659 ports[port_id].enabled = 0;
1660 nb_ports = rte_eth_dev_count();
1662 printf("Port '%s' is detached. Now total ports is %d\n",
1674 stop_packet_forwarding();
1676 if (ports != NULL) {
1678 FOREACH_PORT(pt_id, ports) {
1679 printf("\nShutting down port %d...\n", pt_id);
1685 printf("\nBye...\n");
1688 typedef void (*cmd_func_t)(void);
1689 struct pmd_test_command {
1690 const char *cmd_name;
1691 cmd_func_t cmd_func;
1694 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1696 /* Check the link status of all ports in up to 9s, and print them finally */
1698 check_all_ports_link_status(uint32_t port_mask)
1700 #define CHECK_INTERVAL 100 /* 100ms */
1701 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1702 uint8_t portid, count, all_ports_up, print_flag = 0;
1703 struct rte_eth_link link;
1705 printf("Checking link statuses...\n");
1707 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1709 FOREACH_PORT(portid, ports) {
1710 if ((port_mask & (1 << portid)) == 0)
1712 memset(&link, 0, sizeof(link));
1713 rte_eth_link_get_nowait(portid, &link);
1714 /* print link status if flag set */
1715 if (print_flag == 1) {
1716 if (link.link_status)
1717 printf("Port %d Link Up - speed %u "
1718 "Mbps - %s\n", (uint8_t)portid,
1719 (unsigned)link.link_speed,
1720 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1721 ("full-duplex") : ("half-duplex\n"));
1723 printf("Port %d Link Down\n",
1727 /* clear all_ports_up flag if any link down */
1728 if (link.link_status == ETH_LINK_DOWN) {
1733 /* after finally printing all link status, get out */
1734 if (print_flag == 1)
1737 if (all_ports_up == 0) {
1739 rte_delay_ms(CHECK_INTERVAL);
1742 /* set the print_flag if all ports up or timeout */
1743 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1750 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1754 uint8_t mapping_found = 0;
1756 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1757 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1758 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1759 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1760 tx_queue_stats_mappings[i].queue_id,
1761 tx_queue_stats_mappings[i].stats_counter_id);
1768 port->tx_queue_stats_mapping_enabled = 1;
1773 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1777 uint8_t mapping_found = 0;
1779 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1780 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1781 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1782 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1783 rx_queue_stats_mappings[i].queue_id,
1784 rx_queue_stats_mappings[i].stats_counter_id);
1791 port->rx_queue_stats_mapping_enabled = 1;
1796 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1800 diag = set_tx_queue_stats_mapping_registers(pi, port);
1802 if (diag == -ENOTSUP) {
1803 port->tx_queue_stats_mapping_enabled = 0;
1804 printf("TX queue stats mapping not supported port id=%d\n", pi);
1807 rte_exit(EXIT_FAILURE,
1808 "set_tx_queue_stats_mapping_registers "
1809 "failed for port id=%d diag=%d\n",
1813 diag = set_rx_queue_stats_mapping_registers(pi, port);
1815 if (diag == -ENOTSUP) {
1816 port->rx_queue_stats_mapping_enabled = 0;
1817 printf("RX queue stats mapping not supported port id=%d\n", pi);
1820 rte_exit(EXIT_FAILURE,
1821 "set_rx_queue_stats_mapping_registers "
1822 "failed for port id=%d diag=%d\n",
1828 rxtx_port_config(struct rte_port *port)
1830 port->rx_conf = port->dev_info.default_rxconf;
1831 port->tx_conf = port->dev_info.default_txconf;
1833 /* Check if any RX/TX parameters have been passed */
1834 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1835 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1837 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1838 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1840 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1841 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1843 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1844 port->rx_conf.rx_free_thresh = rx_free_thresh;
1846 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1847 port->rx_conf.rx_drop_en = rx_drop_en;
1849 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1850 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1852 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1853 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1855 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1856 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1858 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1859 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1861 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1862 port->tx_conf.tx_free_thresh = tx_free_thresh;
1864 if (txq_flags != RTE_PMD_PARAM_UNSET)
1865 port->tx_conf.txq_flags = txq_flags;
1869 init_port_config(void)
1872 struct rte_port *port;
1874 FOREACH_PORT(pid, ports) {
1876 port->dev_conf.rxmode = rx_mode;
1877 port->dev_conf.fdir_conf = fdir_conf;
1879 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1880 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1882 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1883 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1886 if (port->dcb_flag == 0) {
1887 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1888 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1890 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1893 rxtx_port_config(port);
1895 rte_eth_macaddr_get(pid, &port->eth_addr);
1897 map_port_queue_stats_mapping_registers(pid, port);
1898 #ifdef RTE_NIC_BYPASS
1899 rte_eth_dev_bypass_init(pid);
1904 void set_port_slave_flag(portid_t slave_pid)
1906 struct rte_port *port;
1908 port = &ports[slave_pid];
1909 port->slave_flag = 1;
1912 void clear_port_slave_flag(portid_t slave_pid)
1914 struct rte_port *port;
1916 port = &ports[slave_pid];
1917 port->slave_flag = 0;
1920 uint8_t port_is_bonding_slave(portid_t slave_pid)
1922 struct rte_port *port;
1924 port = &ports[slave_pid];
1925 return port->slave_flag;
1928 const uint16_t vlan_tags[] = {
1929 0, 1, 2, 3, 4, 5, 6, 7,
1930 8, 9, 10, 11, 12, 13, 14, 15,
1931 16, 17, 18, 19, 20, 21, 22, 23,
1932 24, 25, 26, 27, 28, 29, 30, 31
1936 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1937 enum dcb_mode_enable dcb_mode,
1938 enum rte_eth_nb_tcs num_tcs,
1944 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1945 * given above, and the number of traffic classes available for use.
1947 if (dcb_mode == DCB_VT_ENABLED) {
1948 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1949 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1950 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1951 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1953 /* VMDQ+DCB RX and TX configurations */
1954 vmdq_rx_conf->enable_default_pool = 0;
1955 vmdq_rx_conf->default_pool = 0;
1956 vmdq_rx_conf->nb_queue_pools =
1957 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1958 vmdq_tx_conf->nb_queue_pools =
1959 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1961 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1962 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1963 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1964 vmdq_rx_conf->pool_map[i].pools =
1965 1 << (i % vmdq_rx_conf->nb_queue_pools);
1967 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1968 vmdq_rx_conf->dcb_tc[i] = i;
1969 vmdq_tx_conf->dcb_tc[i] = i;
1972 /* set DCB mode of RX and TX of multiple queues */
1973 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1974 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1976 struct rte_eth_dcb_rx_conf *rx_conf =
1977 ð_conf->rx_adv_conf.dcb_rx_conf;
1978 struct rte_eth_dcb_tx_conf *tx_conf =
1979 ð_conf->tx_adv_conf.dcb_tx_conf;
1981 rx_conf->nb_tcs = num_tcs;
1982 tx_conf->nb_tcs = num_tcs;
1984 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1985 rx_conf->dcb_tc[i] = i % num_tcs;
1986 tx_conf->dcb_tc[i] = i % num_tcs;
1988 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1989 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1990 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1994 eth_conf->dcb_capability_en =
1995 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1997 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2003 init_port_dcb_config(portid_t pid,
2004 enum dcb_mode_enable dcb_mode,
2005 enum rte_eth_nb_tcs num_tcs,
2008 struct rte_eth_conf port_conf;
2009 struct rte_port *rte_port;
2013 rte_port = &ports[pid];
2015 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2016 /* Enter DCB configuration status */
2019 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2020 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2023 port_conf.rxmode.hw_vlan_filter = 1;
2026 * Write the configuration into the device.
2027 * Set the numbers of RX & TX queues to 0, so
2028 * the RX & TX queues will not be setup.
2030 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2032 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2034 /* If dev_info.vmdq_pool_base is greater than 0,
2035 * the queue id of vmdq pools is started after pf queues.
2037 if (dcb_mode == DCB_VT_ENABLED &&
2038 rte_port->dev_info.vmdq_pool_base > 0) {
2039 printf("VMDQ_DCB multi-queue mode is nonsensical"
2040 " for port %d.", pid);
2044 /* Assume the ports in testpmd have the same dcb capability
2045 * and has the same number of rxq and txq in dcb mode
2047 if (dcb_mode == DCB_VT_ENABLED) {
2048 if (rte_port->dev_info.max_vfs > 0) {
2049 nb_rxq = rte_port->dev_info.nb_rx_queues;
2050 nb_txq = rte_port->dev_info.nb_tx_queues;
2052 nb_rxq = rte_port->dev_info.max_rx_queues;
2053 nb_txq = rte_port->dev_info.max_tx_queues;
2056 /*if vt is disabled, use all pf queues */
2057 if (rte_port->dev_info.vmdq_pool_base == 0) {
2058 nb_rxq = rte_port->dev_info.max_rx_queues;
2059 nb_txq = rte_port->dev_info.max_tx_queues;
2061 nb_rxq = (queueid_t)num_tcs;
2062 nb_txq = (queueid_t)num_tcs;
2066 rx_free_thresh = 64;
2068 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2070 rxtx_port_config(rte_port);
2072 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2073 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2074 rx_vft_set(pid, vlan_tags[i], 1);
2076 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2077 map_port_queue_stats_mapping_registers(pid, rte_port);
2079 rte_port->dcb_flag = 1;
2089 /* Configuration of Ethernet ports. */
2090 ports = rte_zmalloc("testpmd: ports",
2091 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2092 RTE_CACHE_LINE_SIZE);
2093 if (ports == NULL) {
2094 rte_exit(EXIT_FAILURE,
2095 "rte_zmalloc(%d struct rte_port) failed\n",
2099 /* enabled allocated ports */
2100 for (pid = 0; pid < nb_ports; pid++)
2101 ports[pid].enabled = 1;
2112 signal_handler(int signum)
2114 if (signum == SIGINT || signum == SIGTERM) {
2115 printf("\nSignal %d received, preparing to exit...\n",
2117 #ifdef RTE_LIBRTE_PDUMP
2118 /* uninitialize packet capture framework */
2121 #ifdef RTE_LIBRTE_LATENCY_STATS
2122 rte_latencystats_uninit();
2125 /* exit with the expected status */
2126 signal(signum, SIG_DFL);
2127 kill(getpid(), signum);
2132 main(int argc, char** argv)
2137 signal(SIGINT, signal_handler);
2138 signal(SIGTERM, signal_handler);
2140 diag = rte_eal_init(argc, argv);
2142 rte_panic("Cannot init EAL\n");
2144 #ifdef RTE_LIBRTE_PDUMP
2145 /* initialize packet capture framework */
2146 rte_pdump_init(NULL);
2149 nb_ports = (portid_t) rte_eth_dev_count();
2151 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2153 /* allocate port structures, and init them */
2156 set_def_fwd_config();
2158 rte_panic("Empty set of forwarding logical cores - check the "
2159 "core mask supplied in the command parameters\n");
2164 launch_args_parse(argc, argv);
2166 if (!nb_rxq && !nb_txq)
2167 printf("Warning: Either rx or tx queues should be non-zero\n");
2169 if (nb_rxq > 1 && nb_rxq > nb_txq)
2170 printf("Warning: nb_rxq=%d enables RSS configuration, "
2171 "but nb_txq=%d will prevent to fully test it.\n",
2175 if (start_port(RTE_PORT_ALL) != 0)
2176 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2178 /* set all ports to promiscuous mode by default */
2179 FOREACH_PORT(port_id, ports)
2180 rte_eth_promiscuous_enable(port_id);
2182 /* Init metrics library */
2183 rte_metrics_init(rte_socket_id());
2185 #ifdef RTE_LIBRTE_LATENCY_STATS
2186 if (latencystats_enabled != 0) {
2187 int ret = rte_latencystats_init(1, NULL);
2189 printf("Warning: latencystats init()"
2190 " returned error %d\n", ret);
2191 printf("Latencystats running on lcore %d\n",
2192 latencystats_lcore_id);
2196 /* Setup bitrate stats */
2197 #ifdef RTE_LIBRTE_BITRATE
2198 bitrate_data = rte_stats_bitrate_create();
2199 if (bitrate_data == NULL)
2200 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2201 rte_stats_bitrate_reg(bitrate_data);
2205 #ifdef RTE_LIBRTE_CMDLINE
2206 if (interactive == 1) {
2208 printf("Start automatic packet forwarding\n");
2209 start_packet_forwarding(0);
2218 printf("No commandline core given, start packet forwarding\n");
2219 start_packet_forwarding(0);
2220 printf("Press enter to exit\n");
2221 rc = read(0, &c, 1);