4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
290 .vlan_tci_mask = 0x0,
292 .src_ip = 0xFFFFFFFF,
293 .dst_ip = 0xFFFFFFFF,
296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 .src_port_mask = 0xFFFF,
300 .dst_port_mask = 0xFFFF,
301 .mac_addr_byte_mask = 0xFF,
302 .tunnel_type_mask = 1,
303 .tunnel_id_mask = 0xFFFFFFFF,
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
319 unsigned max_socket = 0;
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
326 * Check if all the ports are started.
327 * If yes, return positive value. If not, return zero.
329 static int all_ports_started(void);
332 * Find next enabled port
335 find_next_port(portid_t p, struct rte_port *ports, int size)
338 rte_exit(-EINVAL, "failed to find a next port id\n");
340 while ((p < size) && (ports[p].enabled == 0))
346 * Setup default configuration.
349 set_default_fwd_lcores_config(void)
353 unsigned int sock_num;
356 for (i = 0; i < RTE_MAX_LCORE; i++) {
357 sock_num = rte_lcore_to_socket_id(i) + 1;
358 if (sock_num > max_socket) {
359 if (sock_num > RTE_MAX_NUMA_NODES)
360 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361 max_socket = sock_num;
363 if (!rte_lcore_is_enabled(i))
365 if (i == rte_get_master_lcore())
367 fwd_lcores_cpuids[nb_lc++] = i;
369 nb_lcores = (lcoreid_t) nb_lc;
370 nb_cfg_lcores = nb_lcores;
375 set_def_peer_eth_addrs(void)
379 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381 peer_eth_addrs[i].addr_bytes[5] = i;
386 set_default_fwd_ports_config(void)
390 for (pt_id = 0; pt_id < nb_ports; pt_id++)
391 fwd_ports_ids[pt_id] = pt_id;
393 nb_cfg_ports = nb_ports;
394 nb_fwd_ports = nb_ports;
398 set_def_fwd_config(void)
400 set_default_fwd_lcores_config();
401 set_def_peer_eth_addrs();
402 set_default_fwd_ports_config();
406 * Configuration initialisation done once at init time.
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410 unsigned int socket_id)
412 char pool_name[RTE_MEMPOOL_NAMESIZE];
413 struct rte_mempool *rte_mp = NULL;
416 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
420 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
421 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
423 #ifdef RTE_LIBRTE_PMD_XENVIRT
424 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
425 (unsigned) mb_mempool_cache,
426 sizeof(struct rte_pktmbuf_pool_private),
427 rte_pktmbuf_pool_init, NULL,
428 rte_pktmbuf_init, NULL,
432 /* if the former XEN allocation failed fall back to normal allocation */
433 if (rte_mp == NULL) {
435 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
436 mb_size, (unsigned) mb_mempool_cache,
437 sizeof(struct rte_pktmbuf_pool_private),
440 if (rte_mempool_populate_anon(rte_mp) == 0) {
441 rte_mempool_free(rte_mp);
444 rte_pktmbuf_pool_init(rte_mp, NULL);
445 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
447 /* wrapper to rte_mempool_create() */
448 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
449 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
453 if (rte_mp == NULL) {
454 rte_exit(EXIT_FAILURE,
455 "Creation of mbuf pool for socket %u failed: %s\n",
456 socket_id, rte_strerror(rte_errno));
457 } else if (verbose_level > 0) {
458 rte_mempool_dump(stdout, rte_mp);
463 * Check given socket id is valid or not with NUMA mode,
464 * if valid, return 0, else return -1
467 check_socket_id(const unsigned int socket_id)
469 static int warning_once = 0;
471 if (socket_id >= max_socket) {
472 if (!warning_once && numa_support)
473 printf("Warning: NUMA should be configured manually by"
474 " using --port-numa-config and"
475 " --ring-numa-config parameters along with"
487 struct rte_port *port;
488 struct rte_mempool *mbp;
489 unsigned int nb_mbuf_per_pool;
491 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
493 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
494 /* Configuration of logical cores. */
495 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
496 sizeof(struct fwd_lcore *) * nb_lcores,
497 RTE_CACHE_LINE_SIZE);
498 if (fwd_lcores == NULL) {
499 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
500 "failed\n", nb_lcores);
502 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
503 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
504 sizeof(struct fwd_lcore),
505 RTE_CACHE_LINE_SIZE);
506 if (fwd_lcores[lc_id] == NULL) {
507 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
510 fwd_lcores[lc_id]->cpuid_idx = lc_id;
514 * Create pools of mbuf.
515 * If NUMA support is disabled, create a single pool of mbuf in
516 * socket 0 memory by default.
517 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
519 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
520 * nb_txd can be configured at run time.
522 if (param_total_num_mbufs)
523 nb_mbuf_per_pool = param_total_num_mbufs;
525 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
526 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
530 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
534 if (socket_num == UMA_NO_CONFIG)
535 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
537 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
541 FOREACH_PORT(pid, ports) {
543 rte_eth_dev_info_get(pid, &port->dev_info);
546 if (port_numa[pid] != NUMA_NO_CONFIG)
547 port_per_socket[port_numa[pid]]++;
549 uint32_t socket_id = rte_eth_dev_socket_id(pid);
551 /* if socket_id is invalid, set to 0 */
552 if (check_socket_id(socket_id) < 0)
554 port_per_socket[socket_id]++;
558 /* set flag to initialize port/queue */
559 port->need_reconfig = 1;
560 port->need_reconfig_queues = 1;
565 unsigned int nb_mbuf;
567 if (param_total_num_mbufs)
568 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
570 for (i = 0; i < max_socket; i++) {
571 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
573 mbuf_pool_create(mbuf_data_size,
580 * Records which Mbuf pool to use by each logical core, if needed.
582 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583 mbp = mbuf_pool_find(
584 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
587 mbp = mbuf_pool_find(0);
588 fwd_lcores[lc_id]->mbp = mbp;
591 /* Configuration of packet forwarding streams. */
592 if (init_fwd_streams() < 0)
593 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
600 reconfig(portid_t new_port_id, unsigned socket_id)
602 struct rte_port *port;
604 /* Reconfiguration of Ethernet ports. */
605 port = &ports[new_port_id];
606 rte_eth_dev_info_get(new_port_id, &port->dev_info);
608 /* set flag to initialize port/queue */
609 port->need_reconfig = 1;
610 port->need_reconfig_queues = 1;
611 port->socket_id = socket_id;
618 init_fwd_streams(void)
621 struct rte_port *port;
622 streamid_t sm_id, nb_fwd_streams_new;
625 /* set socket id according to numa or not */
626 FOREACH_PORT(pid, ports) {
628 if (nb_rxq > port->dev_info.max_rx_queues) {
629 printf("Fail: nb_rxq(%d) is greater than "
630 "max_rx_queues(%d)\n", nb_rxq,
631 port->dev_info.max_rx_queues);
634 if (nb_txq > port->dev_info.max_tx_queues) {
635 printf("Fail: nb_txq(%d) is greater than "
636 "max_tx_queues(%d)\n", nb_txq,
637 port->dev_info.max_tx_queues);
641 if (port_numa[pid] != NUMA_NO_CONFIG)
642 port->socket_id = port_numa[pid];
644 port->socket_id = rte_eth_dev_socket_id(pid);
646 /* if socket_id is invalid, set to 0 */
647 if (check_socket_id(port->socket_id) < 0)
652 if (socket_num == UMA_NO_CONFIG)
655 port->socket_id = socket_num;
659 q = RTE_MAX(nb_rxq, nb_txq);
661 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
664 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
665 if (nb_fwd_streams_new == nb_fwd_streams)
668 if (fwd_streams != NULL) {
669 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
670 if (fwd_streams[sm_id] == NULL)
672 rte_free(fwd_streams[sm_id]);
673 fwd_streams[sm_id] = NULL;
675 rte_free(fwd_streams);
680 nb_fwd_streams = nb_fwd_streams_new;
681 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
682 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
683 if (fwd_streams == NULL)
684 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
685 "failed\n", nb_fwd_streams);
687 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
688 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
689 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
690 if (fwd_streams[sm_id] == NULL)
691 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
698 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
700 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
702 unsigned int total_burst;
703 unsigned int nb_burst;
704 unsigned int burst_stats[3];
705 uint16_t pktnb_stats[3];
707 int burst_percent[3];
710 * First compute the total number of packet bursts and the
711 * two highest numbers of bursts of the same number of packets.
714 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
715 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
716 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
717 nb_burst = pbs->pkt_burst_spread[nb_pkt];
720 total_burst += nb_burst;
721 if (nb_burst > burst_stats[0]) {
722 burst_stats[1] = burst_stats[0];
723 pktnb_stats[1] = pktnb_stats[0];
724 burst_stats[0] = nb_burst;
725 pktnb_stats[0] = nb_pkt;
728 if (total_burst == 0)
730 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
731 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
732 burst_percent[0], (int) pktnb_stats[0]);
733 if (burst_stats[0] == total_burst) {
737 if (burst_stats[0] + burst_stats[1] == total_burst) {
738 printf(" + %d%% of %d pkts]\n",
739 100 - burst_percent[0], pktnb_stats[1]);
742 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
743 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
744 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
745 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
748 printf(" + %d%% of %d pkts + %d%% of others]\n",
749 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
751 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
754 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
756 struct rte_port *port;
759 static const char *fwd_stats_border = "----------------------";
761 port = &ports[port_id];
762 printf("\n %s Forward statistics for port %-2d %s\n",
763 fwd_stats_border, port_id, fwd_stats_border);
765 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
766 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
768 stats->ipackets, stats->imissed,
769 (uint64_t) (stats->ipackets + stats->imissed));
771 if (cur_fwd_eng == &csum_fwd_engine)
772 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
773 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
774 if ((stats->ierrors + stats->rx_nombuf) > 0) {
775 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
776 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
779 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
781 stats->opackets, port->tx_dropped,
782 (uint64_t) (stats->opackets + port->tx_dropped));
785 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
787 stats->ipackets, stats->imissed,
788 (uint64_t) (stats->ipackets + stats->imissed));
790 if (cur_fwd_eng == &csum_fwd_engine)
791 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
792 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
793 if ((stats->ierrors + stats->rx_nombuf) > 0) {
794 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
795 printf(" RX-nombufs: %14"PRIu64"\n",
799 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
801 stats->opackets, port->tx_dropped,
802 (uint64_t) (stats->opackets + port->tx_dropped));
805 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
807 pkt_burst_stats_display("RX",
808 &port->rx_stream->rx_burst_stats);
810 pkt_burst_stats_display("TX",
811 &port->tx_stream->tx_burst_stats);
814 if (port->rx_queue_stats_mapping_enabled) {
816 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
817 printf(" Stats reg %2d RX-packets:%14"PRIu64
818 " RX-errors:%14"PRIu64
819 " RX-bytes:%14"PRIu64"\n",
820 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
824 if (port->tx_queue_stats_mapping_enabled) {
825 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 printf(" Stats reg %2d TX-packets:%14"PRIu64
827 " TX-bytes:%14"PRIu64"\n",
828 i, stats->q_opackets[i], stats->q_obytes[i]);
832 printf(" %s--------------------------------%s\n",
833 fwd_stats_border, fwd_stats_border);
837 fwd_stream_stats_display(streamid_t stream_id)
839 struct fwd_stream *fs;
840 static const char *fwd_top_stats_border = "-------";
842 fs = fwd_streams[stream_id];
843 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
844 (fs->fwd_dropped == 0))
846 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
847 "TX Port=%2d/Queue=%2d %s\n",
848 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
849 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
850 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
851 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
853 /* if checksum mode */
854 if (cur_fwd_eng == &csum_fwd_engine) {
855 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
856 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
859 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
860 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
861 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
866 flush_fwd_rx_queues(void)
868 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
876 for (j = 0; j < 2; j++) {
877 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
878 for (rxq = 0; rxq < nb_rxq; rxq++) {
879 port_id = fwd_ports_ids[rxp];
881 nb_rx = rte_eth_rx_burst(port_id, rxq,
882 pkts_burst, MAX_PKT_BURST);
883 for (i = 0; i < nb_rx; i++)
884 rte_pktmbuf_free(pkts_burst[i]);
888 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
893 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
895 struct fwd_stream **fsm;
899 fsm = &fwd_streams[fc->stream_idx];
900 nb_fs = fc->stream_nb;
902 for (sm_id = 0; sm_id < nb_fs; sm_id++)
903 (*pkt_fwd)(fsm[sm_id]);
904 } while (! fc->stopped);
908 start_pkt_forward_on_core(void *fwd_arg)
910 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
911 cur_fwd_config.fwd_eng->packet_fwd);
916 * Run the TXONLY packet forwarding engine to send a single burst of packets.
917 * Used to start communication flows in network loopback test configurations.
920 run_one_txonly_burst_on_core(void *fwd_arg)
922 struct fwd_lcore *fwd_lc;
923 struct fwd_lcore tmp_lcore;
925 fwd_lc = (struct fwd_lcore *) fwd_arg;
927 tmp_lcore.stopped = 1;
928 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
933 * Launch packet forwarding:
934 * - Setup per-port forwarding context.
935 * - launch logical cores with their forwarding configuration.
938 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
940 port_fwd_begin_t port_fwd_begin;
945 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
946 if (port_fwd_begin != NULL) {
947 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
948 (*port_fwd_begin)(fwd_ports_ids[i]);
950 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
951 lc_id = fwd_lcores_cpuids[i];
952 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
953 fwd_lcores[i]->stopped = 0;
954 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
955 fwd_lcores[i], lc_id);
957 printf("launch lcore %u failed - diag=%d\n",
964 * Launch packet forwarding configuration.
967 start_packet_forwarding(int with_tx_first)
969 port_fwd_begin_t port_fwd_begin;
970 port_fwd_end_t port_fwd_end;
971 struct rte_port *port;
976 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
977 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
979 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
980 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
982 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
983 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
984 (!nb_rxq || !nb_txq))
985 rte_exit(EXIT_FAILURE,
986 "Either rxq or txq are 0, cannot use %s fwd mode\n",
987 cur_fwd_eng->fwd_mode_name);
989 if (all_ports_started() == 0) {
990 printf("Not all ports were started\n");
993 if (test_done == 0) {
994 printf("Packet forwarding already started\n");
998 if (init_fwd_streams() < 0) {
999 printf("Fail from init_fwd_streams()\n");
1004 for (i = 0; i < nb_fwd_ports; i++) {
1005 pt_id = fwd_ports_ids[i];
1006 port = &ports[pt_id];
1007 if (!port->dcb_flag) {
1008 printf("In DCB mode, all forwarding ports must "
1009 "be configured in this mode.\n");
1013 if (nb_fwd_lcores == 1) {
1014 printf("In DCB mode,the nb forwarding cores "
1015 "should be larger than 1.\n");
1022 flush_fwd_rx_queues();
1025 rxtx_config_display();
1027 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1028 pt_id = fwd_ports_ids[i];
1029 port = &ports[pt_id];
1030 rte_eth_stats_get(pt_id, &port->stats);
1031 port->tx_dropped = 0;
1033 map_port_queue_stats_mapping_registers(pt_id, port);
1035 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1036 fwd_streams[sm_id]->rx_packets = 0;
1037 fwd_streams[sm_id]->tx_packets = 0;
1038 fwd_streams[sm_id]->fwd_dropped = 0;
1039 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1040 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1042 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1043 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1044 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1045 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1046 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1048 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1049 fwd_streams[sm_id]->core_cycles = 0;
1052 if (with_tx_first) {
1053 port_fwd_begin = tx_only_engine.port_fwd_begin;
1054 if (port_fwd_begin != NULL) {
1055 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1056 (*port_fwd_begin)(fwd_ports_ids[i]);
1058 launch_packet_forwarding(run_one_txonly_burst_on_core);
1059 rte_eal_mp_wait_lcore();
1060 port_fwd_end = tx_only_engine.port_fwd_end;
1061 if (port_fwd_end != NULL) {
1062 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1063 (*port_fwd_end)(fwd_ports_ids[i]);
1066 launch_packet_forwarding(start_pkt_forward_on_core);
1070 stop_packet_forwarding(void)
1072 struct rte_eth_stats stats;
1073 struct rte_port *port;
1074 port_fwd_end_t port_fwd_end;
1079 uint64_t total_recv;
1080 uint64_t total_xmit;
1081 uint64_t total_rx_dropped;
1082 uint64_t total_tx_dropped;
1083 uint64_t total_rx_nombuf;
1084 uint64_t tx_dropped;
1085 uint64_t rx_bad_ip_csum;
1086 uint64_t rx_bad_l4_csum;
1087 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1088 uint64_t fwd_cycles;
1090 static const char *acc_stats_border = "+++++++++++++++";
1093 printf("Packet forwarding not started\n");
1096 printf("Telling cores to stop...");
1097 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1098 fwd_lcores[lc_id]->stopped = 1;
1099 printf("\nWaiting for lcores to finish...\n");
1100 rte_eal_mp_wait_lcore();
1101 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1102 if (port_fwd_end != NULL) {
1103 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1104 pt_id = fwd_ports_ids[i];
1105 (*port_fwd_end)(pt_id);
1108 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1111 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1112 if (cur_fwd_config.nb_fwd_streams >
1113 cur_fwd_config.nb_fwd_ports) {
1114 fwd_stream_stats_display(sm_id);
1115 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1116 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1118 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1120 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1123 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1124 tx_dropped = (uint64_t) (tx_dropped +
1125 fwd_streams[sm_id]->fwd_dropped);
1126 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1129 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1130 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1131 fwd_streams[sm_id]->rx_bad_ip_csum);
1132 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1136 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1137 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1138 fwd_streams[sm_id]->rx_bad_l4_csum);
1139 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1142 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1143 fwd_cycles = (uint64_t) (fwd_cycles +
1144 fwd_streams[sm_id]->core_cycles);
1149 total_rx_dropped = 0;
1150 total_tx_dropped = 0;
1151 total_rx_nombuf = 0;
1152 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1153 pt_id = fwd_ports_ids[i];
1155 port = &ports[pt_id];
1156 rte_eth_stats_get(pt_id, &stats);
1157 stats.ipackets -= port->stats.ipackets;
1158 port->stats.ipackets = 0;
1159 stats.opackets -= port->stats.opackets;
1160 port->stats.opackets = 0;
1161 stats.ibytes -= port->stats.ibytes;
1162 port->stats.ibytes = 0;
1163 stats.obytes -= port->stats.obytes;
1164 port->stats.obytes = 0;
1165 stats.imissed -= port->stats.imissed;
1166 port->stats.imissed = 0;
1167 stats.oerrors -= port->stats.oerrors;
1168 port->stats.oerrors = 0;
1169 stats.rx_nombuf -= port->stats.rx_nombuf;
1170 port->stats.rx_nombuf = 0;
1172 total_recv += stats.ipackets;
1173 total_xmit += stats.opackets;
1174 total_rx_dropped += stats.imissed;
1175 total_tx_dropped += port->tx_dropped;
1176 total_rx_nombuf += stats.rx_nombuf;
1178 fwd_port_stats_display(pt_id, &stats);
1180 printf("\n %s Accumulated forward statistics for all ports"
1182 acc_stats_border, acc_stats_border);
1183 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1185 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1187 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1188 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1189 if (total_rx_nombuf > 0)
1190 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1191 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1193 acc_stats_border, acc_stats_border);
1194 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1196 printf("\n CPU cycles/packet=%u (total cycles="
1197 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1198 (unsigned int)(fwd_cycles / total_recv),
1199 fwd_cycles, total_recv);
1201 printf("\nDone.\n");
1206 dev_set_link_up(portid_t pid)
1208 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1209 printf("\nSet link up fail.\n");
1213 dev_set_link_down(portid_t pid)
1215 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1216 printf("\nSet link down fail.\n");
1220 all_ports_started(void)
1223 struct rte_port *port;
1225 FOREACH_PORT(pi, ports) {
1227 /* Check if there is a port which is not started */
1228 if ((port->port_status != RTE_PORT_STARTED) &&
1229 (port->slave_flag == 0))
1233 /* No port is not started */
1238 all_ports_stopped(void)
1241 struct rte_port *port;
1243 FOREACH_PORT(pi, ports) {
1245 if ((port->port_status != RTE_PORT_STOPPED) &&
1246 (port->slave_flag == 0))
1254 port_is_started(portid_t port_id)
1256 if (port_id_is_invalid(port_id, ENABLED_WARN))
1259 if (ports[port_id].port_status != RTE_PORT_STARTED)
1266 port_is_closed(portid_t port_id)
1268 if (port_id_is_invalid(port_id, ENABLED_WARN))
1271 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1278 start_port(portid_t pid)
1280 int diag, need_check_link_status = -1;
1283 struct rte_port *port;
1284 struct ether_addr mac_addr;
1286 if (port_id_is_invalid(pid, ENABLED_WARN))
1291 FOREACH_PORT(pi, ports) {
1292 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1295 need_check_link_status = 0;
1297 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1298 RTE_PORT_HANDLING) == 0) {
1299 printf("Port %d is now not stopped\n", pi);
1303 if (port->need_reconfig > 0) {
1304 port->need_reconfig = 0;
1306 printf("Configuring Port %d (socket %u)\n", pi,
1308 /* configure port */
1309 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1312 if (rte_atomic16_cmpset(&(port->port_status),
1313 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1314 printf("Port %d can not be set back "
1315 "to stopped\n", pi);
1316 printf("Fail to configure port %d\n", pi);
1317 /* try to reconfigure port next time */
1318 port->need_reconfig = 1;
1322 if (port->need_reconfig_queues > 0) {
1323 port->need_reconfig_queues = 0;
1324 /* setup tx queues */
1325 for (qi = 0; qi < nb_txq; qi++) {
1326 if ((numa_support) &&
1327 (txring_numa[pi] != NUMA_NO_CONFIG))
1328 diag = rte_eth_tx_queue_setup(pi, qi,
1329 nb_txd,txring_numa[pi],
1332 diag = rte_eth_tx_queue_setup(pi, qi,
1333 nb_txd,port->socket_id,
1339 /* Fail to setup tx queue, return */
1340 if (rte_atomic16_cmpset(&(port->port_status),
1342 RTE_PORT_STOPPED) == 0)
1343 printf("Port %d can not be set back "
1344 "to stopped\n", pi);
1345 printf("Fail to configure port %d tx queues\n", pi);
1346 /* try to reconfigure queues next time */
1347 port->need_reconfig_queues = 1;
1350 /* setup rx queues */
1351 for (qi = 0; qi < nb_rxq; qi++) {
1352 if ((numa_support) &&
1353 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1354 struct rte_mempool * mp =
1355 mbuf_pool_find(rxring_numa[pi]);
1357 printf("Failed to setup RX queue:"
1358 "No mempool allocation"
1359 " on the socket %d\n",
1364 diag = rte_eth_rx_queue_setup(pi, qi,
1365 nb_rxd,rxring_numa[pi],
1366 &(port->rx_conf),mp);
1368 struct rte_mempool *mp =
1369 mbuf_pool_find(port->socket_id);
1371 printf("Failed to setup RX queue:"
1372 "No mempool allocation"
1373 " on the socket %d\n",
1377 diag = rte_eth_rx_queue_setup(pi, qi,
1378 nb_rxd,port->socket_id,
1379 &(port->rx_conf), mp);
1384 /* Fail to setup rx queue, return */
1385 if (rte_atomic16_cmpset(&(port->port_status),
1387 RTE_PORT_STOPPED) == 0)
1388 printf("Port %d can not be set back "
1389 "to stopped\n", pi);
1390 printf("Fail to configure port %d rx queues\n", pi);
1391 /* try to reconfigure queues next time */
1392 port->need_reconfig_queues = 1;
1397 if (rte_eth_dev_start(pi) < 0) {
1398 printf("Fail to start port %d\n", pi);
1400 /* Fail to setup rx queue, return */
1401 if (rte_atomic16_cmpset(&(port->port_status),
1402 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1403 printf("Port %d can not be set back to "
1408 if (rte_atomic16_cmpset(&(port->port_status),
1409 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1410 printf("Port %d can not be set into started\n", pi);
1412 rte_eth_macaddr_get(pi, &mac_addr);
1413 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1414 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1415 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1416 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1418 /* at least one port started, need checking link status */
1419 need_check_link_status = 1;
1422 if (need_check_link_status == 1 && !no_link_check)
1423 check_all_ports_link_status(RTE_PORT_ALL);
1424 else if (need_check_link_status == 0)
1425 printf("Please stop the ports first\n");
1432 stop_port(portid_t pid)
1435 struct rte_port *port;
1436 int need_check_link_status = 0;
1443 if (port_id_is_invalid(pid, ENABLED_WARN))
1446 printf("Stopping ports...\n");
1448 FOREACH_PORT(pi, ports) {
1449 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1452 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1453 printf("Please remove port %d from forwarding configuration.\n", pi);
1457 if (port_is_bonding_slave(pi)) {
1458 printf("Please remove port %d from bonded device.\n", pi);
1463 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1464 RTE_PORT_HANDLING) == 0)
1467 rte_eth_dev_stop(pi);
1469 if (rte_atomic16_cmpset(&(port->port_status),
1470 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1471 printf("Port %d can not be set into stopped\n", pi);
1472 need_check_link_status = 1;
1474 if (need_check_link_status && !no_link_check)
1475 check_all_ports_link_status(RTE_PORT_ALL);
1481 close_port(portid_t pid)
1484 struct rte_port *port;
1486 if (port_id_is_invalid(pid, ENABLED_WARN))
1489 printf("Closing ports...\n");
1491 FOREACH_PORT(pi, ports) {
1492 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1495 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1496 printf("Please remove port %d from forwarding configuration.\n", pi);
1500 if (port_is_bonding_slave(pi)) {
1501 printf("Please remove port %d from bonded device.\n", pi);
1506 if (rte_atomic16_cmpset(&(port->port_status),
1507 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1508 printf("Port %d is already closed\n", pi);
1512 if (rte_atomic16_cmpset(&(port->port_status),
1513 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1514 printf("Port %d is now not stopped\n", pi);
1518 rte_eth_dev_close(pi);
1520 if (rte_atomic16_cmpset(&(port->port_status),
1521 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1522 printf("Port %d cannot be set to closed\n", pi);
1529 attach_port(char *identifier)
1532 unsigned int socket_id;
1534 printf("Attaching a new port...\n");
1536 if (identifier == NULL) {
1537 printf("Invalid parameters are specified\n");
1541 if (rte_eth_dev_attach(identifier, &pi))
1544 ports[pi].enabled = 1;
1545 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1546 /* if socket_id is invalid, set to 0 */
1547 if (check_socket_id(socket_id) < 0)
1549 reconfig(pi, socket_id);
1550 rte_eth_promiscuous_enable(pi);
1552 nb_ports = rte_eth_dev_count();
1554 ports[pi].port_status = RTE_PORT_STOPPED;
1556 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1561 detach_port(uint8_t port_id)
1563 char name[RTE_ETH_NAME_MAX_LEN];
1565 printf("Detaching a port...\n");
1567 if (!port_is_closed(port_id)) {
1568 printf("Please close port first\n");
1572 if (rte_eth_dev_detach(port_id, name))
1575 ports[port_id].enabled = 0;
1576 nb_ports = rte_eth_dev_count();
1578 printf("Port '%s' is detached. Now total ports is %d\n",
1590 stop_packet_forwarding();
1592 if (ports != NULL) {
1594 FOREACH_PORT(pt_id, ports) {
1595 printf("\nShutting down port %d...\n", pt_id);
1601 printf("\nBye...\n");
1604 typedef void (*cmd_func_t)(void);
1605 struct pmd_test_command {
1606 const char *cmd_name;
1607 cmd_func_t cmd_func;
1610 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1612 /* Check the link status of all ports in up to 9s, and print them finally */
1614 check_all_ports_link_status(uint32_t port_mask)
1616 #define CHECK_INTERVAL 100 /* 100ms */
1617 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1618 uint8_t portid, count, all_ports_up, print_flag = 0;
1619 struct rte_eth_link link;
1621 printf("Checking link statuses...\n");
1623 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1625 FOREACH_PORT(portid, ports) {
1626 if ((port_mask & (1 << portid)) == 0)
1628 memset(&link, 0, sizeof(link));
1629 rte_eth_link_get_nowait(portid, &link);
1630 /* print link status if flag set */
1631 if (print_flag == 1) {
1632 if (link.link_status)
1633 printf("Port %d Link Up - speed %u "
1634 "Mbps - %s\n", (uint8_t)portid,
1635 (unsigned)link.link_speed,
1636 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1637 ("full-duplex") : ("half-duplex\n"));
1639 printf("Port %d Link Down\n",
1643 /* clear all_ports_up flag if any link down */
1644 if (link.link_status == ETH_LINK_DOWN) {
1649 /* after finally printing all link status, get out */
1650 if (print_flag == 1)
1653 if (all_ports_up == 0) {
1655 rte_delay_ms(CHECK_INTERVAL);
1658 /* set the print_flag if all ports up or timeout */
1659 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1666 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1670 uint8_t mapping_found = 0;
1672 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1673 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1674 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1675 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1676 tx_queue_stats_mappings[i].queue_id,
1677 tx_queue_stats_mappings[i].stats_counter_id);
1684 port->tx_queue_stats_mapping_enabled = 1;
1689 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1693 uint8_t mapping_found = 0;
1695 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1696 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1697 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1698 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1699 rx_queue_stats_mappings[i].queue_id,
1700 rx_queue_stats_mappings[i].stats_counter_id);
1707 port->rx_queue_stats_mapping_enabled = 1;
1712 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1716 diag = set_tx_queue_stats_mapping_registers(pi, port);
1718 if (diag == -ENOTSUP) {
1719 port->tx_queue_stats_mapping_enabled = 0;
1720 printf("TX queue stats mapping not supported port id=%d\n", pi);
1723 rte_exit(EXIT_FAILURE,
1724 "set_tx_queue_stats_mapping_registers "
1725 "failed for port id=%d diag=%d\n",
1729 diag = set_rx_queue_stats_mapping_registers(pi, port);
1731 if (diag == -ENOTSUP) {
1732 port->rx_queue_stats_mapping_enabled = 0;
1733 printf("RX queue stats mapping not supported port id=%d\n", pi);
1736 rte_exit(EXIT_FAILURE,
1737 "set_rx_queue_stats_mapping_registers "
1738 "failed for port id=%d diag=%d\n",
1744 rxtx_port_config(struct rte_port *port)
1746 port->rx_conf = port->dev_info.default_rxconf;
1747 port->tx_conf = port->dev_info.default_txconf;
1749 /* Check if any RX/TX parameters have been passed */
1750 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1751 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1753 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1754 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1756 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1757 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1759 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1760 port->rx_conf.rx_free_thresh = rx_free_thresh;
1762 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1763 port->rx_conf.rx_drop_en = rx_drop_en;
1765 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1766 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1768 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1769 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1771 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1772 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1774 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1775 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1777 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1778 port->tx_conf.tx_free_thresh = tx_free_thresh;
1780 if (txq_flags != RTE_PMD_PARAM_UNSET)
1781 port->tx_conf.txq_flags = txq_flags;
1785 init_port_config(void)
1788 struct rte_port *port;
1790 FOREACH_PORT(pid, ports) {
1792 port->dev_conf.rxmode = rx_mode;
1793 port->dev_conf.fdir_conf = fdir_conf;
1795 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1796 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1798 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1799 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1802 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1803 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1804 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1806 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1809 if (port->dev_info.max_vfs != 0) {
1810 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1811 port->dev_conf.rxmode.mq_mode =
1814 port->dev_conf.rxmode.mq_mode =
1817 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1820 rxtx_port_config(port);
1822 rte_eth_macaddr_get(pid, &port->eth_addr);
1824 map_port_queue_stats_mapping_registers(pid, port);
1825 #ifdef RTE_NIC_BYPASS
1826 rte_eth_dev_bypass_init(pid);
1831 void set_port_slave_flag(portid_t slave_pid)
1833 struct rte_port *port;
1835 port = &ports[slave_pid];
1836 port->slave_flag = 1;
1839 void clear_port_slave_flag(portid_t slave_pid)
1841 struct rte_port *port;
1843 port = &ports[slave_pid];
1844 port->slave_flag = 0;
1847 uint8_t port_is_bonding_slave(portid_t slave_pid)
1849 struct rte_port *port;
1851 port = &ports[slave_pid];
1852 return port->slave_flag;
1855 const uint16_t vlan_tags[] = {
1856 0, 1, 2, 3, 4, 5, 6, 7,
1857 8, 9, 10, 11, 12, 13, 14, 15,
1858 16, 17, 18, 19, 20, 21, 22, 23,
1859 24, 25, 26, 27, 28, 29, 30, 31
1863 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1864 enum dcb_mode_enable dcb_mode,
1865 enum rte_eth_nb_tcs num_tcs,
1871 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1872 * given above, and the number of traffic classes available for use.
1874 if (dcb_mode == DCB_VT_ENABLED) {
1875 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1876 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1877 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1878 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1880 /* VMDQ+DCB RX and TX configrations */
1881 vmdq_rx_conf->enable_default_pool = 0;
1882 vmdq_rx_conf->default_pool = 0;
1883 vmdq_rx_conf->nb_queue_pools =
1884 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1885 vmdq_tx_conf->nb_queue_pools =
1886 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1888 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1889 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1890 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1891 vmdq_rx_conf->pool_map[i].pools =
1892 1 << (i % vmdq_rx_conf->nb_queue_pools);
1894 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1895 vmdq_rx_conf->dcb_tc[i] = i;
1896 vmdq_tx_conf->dcb_tc[i] = i;
1899 /* set DCB mode of RX and TX of multiple queues */
1900 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1901 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1903 struct rte_eth_dcb_rx_conf *rx_conf =
1904 ð_conf->rx_adv_conf.dcb_rx_conf;
1905 struct rte_eth_dcb_tx_conf *tx_conf =
1906 ð_conf->tx_adv_conf.dcb_tx_conf;
1908 rx_conf->nb_tcs = num_tcs;
1909 tx_conf->nb_tcs = num_tcs;
1911 for (i = 0; i < num_tcs; i++) {
1912 rx_conf->dcb_tc[i] = i;
1913 tx_conf->dcb_tc[i] = i;
1915 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1916 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1917 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1921 eth_conf->dcb_capability_en =
1922 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1924 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1930 init_port_dcb_config(portid_t pid,
1931 enum dcb_mode_enable dcb_mode,
1932 enum rte_eth_nb_tcs num_tcs,
1935 struct rte_eth_conf port_conf;
1936 struct rte_eth_dev_info dev_info;
1937 struct rte_port *rte_port;
1941 rte_eth_dev_info_get(pid, &dev_info);
1943 /* If dev_info.vmdq_pool_base is greater than 0,
1944 * the queue id of vmdq pools is started after pf queues.
1946 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1947 printf("VMDQ_DCB multi-queue mode is nonsensical"
1948 " for port %d.", pid);
1952 /* Assume the ports in testpmd have the same dcb capability
1953 * and has the same number of rxq and txq in dcb mode
1955 if (dcb_mode == DCB_VT_ENABLED) {
1956 nb_rxq = dev_info.max_rx_queues;
1957 nb_txq = dev_info.max_tx_queues;
1959 /*if vt is disabled, use all pf queues */
1960 if (dev_info.vmdq_pool_base == 0) {
1961 nb_rxq = dev_info.max_rx_queues;
1962 nb_txq = dev_info.max_tx_queues;
1964 nb_rxq = (queueid_t)num_tcs;
1965 nb_txq = (queueid_t)num_tcs;
1969 rx_free_thresh = 64;
1971 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1972 /* Enter DCB configuration status */
1975 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1976 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1980 rte_port = &ports[pid];
1981 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1983 rxtx_port_config(rte_port);
1985 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1986 for (i = 0; i < RTE_DIM(vlan_tags); i++)
1987 rx_vft_set(pid, vlan_tags[i], 1);
1989 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1990 map_port_queue_stats_mapping_registers(pid, rte_port);
1992 rte_port->dcb_flag = 1;
2002 /* Configuration of Ethernet ports. */
2003 ports = rte_zmalloc("testpmd: ports",
2004 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2005 RTE_CACHE_LINE_SIZE);
2006 if (ports == NULL) {
2007 rte_exit(EXIT_FAILURE,
2008 "rte_zmalloc(%d struct rte_port) failed\n",
2012 /* enabled allocated ports */
2013 for (pid = 0; pid < nb_ports; pid++)
2014 ports[pid].enabled = 1;
2025 signal_handler(int signum)
2027 if (signum == SIGINT || signum == SIGTERM) {
2028 printf("\nSignal %d received, preparing to exit...\n",
2031 /* exit with the expected status */
2032 signal(signum, SIG_DFL);
2033 kill(getpid(), signum);
2038 main(int argc, char** argv)
2043 signal(SIGINT, signal_handler);
2044 signal(SIGTERM, signal_handler);
2046 diag = rte_eal_init(argc, argv);
2048 rte_panic("Cannot init EAL\n");
2050 nb_ports = (portid_t) rte_eth_dev_count();
2052 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2054 /* allocate port structures, and init them */
2057 set_def_fwd_config();
2059 rte_panic("Empty set of forwarding logical cores - check the "
2060 "core mask supplied in the command parameters\n");
2065 launch_args_parse(argc, argv);
2067 if (!nb_rxq && !nb_txq)
2068 printf("Warning: Either rx or tx queues should be non-zero\n");
2070 if (nb_rxq > 1 && nb_rxq > nb_txq)
2071 printf("Warning: nb_rxq=%d enables RSS configuration, "
2072 "but nb_txq=%d will prevent to fully test it.\n",
2076 if (start_port(RTE_PORT_ALL) != 0)
2077 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2079 /* set all ports to promiscuous mode by default */
2080 FOREACH_PORT(port_id, ports)
2081 rte_eth_promiscuous_enable(port_id);
2083 #ifdef RTE_LIBRTE_CMDLINE
2084 if (interactive == 1) {
2086 printf("Start automatic packet forwarding\n");
2087 start_packet_forwarding(0);
2096 printf("No commandline core given, start packet forwarding\n");
2097 start_packet_forwarding(0);
2098 printf("Press enter to exit\n");
2099 rc = read(0, &c, 1);