4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
82 uint16_t verbose_level = 0; /**< Silent by default. */
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
89 * NUMA support configuration.
90 * When set, the NUMA support attempts to dispatch the allocation of the
91 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92 * probed ports among the CPU sockets 0 and 1.
93 * Otherwise, all memory is allocated from CPU socket 0.
95 uint8_t numa_support = 0; /**< No numa support by default */
98 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 uint8_t socket_num = UMA_NO_CONFIG;
104 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
109 * Record the Ethernet address of peer target ports to which packets are
111 * Must be instanciated with the ethernet addresses of peer traffic generator
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
118 * Probed Target Environment.
120 struct rte_port *ports; /**< For all probed ethernet ports. */
121 portid_t nb_ports; /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
126 * Test Forwarding Configuration.
127 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t nb_cfg_ports; /**< Number of configured ports. */
133 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
142 * Forwarding engines.
144 struct fwd_engine * fwd_engines[] = {
147 &mac_retry_fwd_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155 &ieee1588_fwd_engine,
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
165 * specified on command-line. */
168 * Configuration of packet segments used by the "txonly" processing engine.
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172 TXONLY_DEF_PACKET_LEN,
174 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
189 * Configurable number of RX/TX queues.
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 * Configurable number of RX/TX ring descriptors.
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
202 #define RTE_PMD_PARAM_UNSET -1
204 * Configurable values of RX and TX ring threshold registers.
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 * Configurable value of RX free threshold.
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX drop enable.
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 * Configurable value of TX free threshold.
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX RS bit threshold.
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX queue flags.
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 * Receive Side Scaling (RSS) configuration.
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 * Port topology configuration
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 * Avoids to flush all the RX streams before starts forwarding.
253 uint8_t no_flush_rx = 0; /* flush by default */
256 * Avoids to check link status when starting/stopping a port.
258 uint8_t no_link_check = 0; /* check by default */
261 * NIC bypass mode configuration options.
263 #ifdef RTE_NIC_BYPASS
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271 * Ethernet device configuration.
273 struct rte_eth_rxmode rx_mode = {
274 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
276 .header_split = 0, /**< Header Split disabled. */
277 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
280 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
282 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
285 struct rte_fdir_conf fdir_conf = {
286 .mode = RTE_FDIR_MODE_NONE,
287 .pballoc = RTE_FDIR_PBALLOC_64K,
288 .status = RTE_FDIR_REPORT_STATUS,
290 .vlan_tci_mask = 0x0,
292 .src_ip = 0xFFFFFFFF,
293 .dst_ip = 0xFFFFFFFF,
296 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 .src_port_mask = 0xFFFF,
300 .dst_port_mask = 0xFFFF,
301 .mac_addr_byte_mask = 0xFF,
302 .tunnel_type_mask = 1,
303 .tunnel_id_mask = 0xFFFFFFFF,
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
319 unsigned max_socket = 0;
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
326 * Check if all the ports are started.
327 * If yes, return positive value. If not, return zero.
329 static int all_ports_started(void);
332 * Find next enabled port
335 find_next_port(portid_t p, struct rte_port *ports, int size)
338 rte_exit(-EINVAL, "failed to find a next port id\n");
340 while ((p < size) && (ports[p].enabled == 0))
346 * Setup default configuration.
349 set_default_fwd_lcores_config(void)
353 unsigned int sock_num;
356 for (i = 0; i < RTE_MAX_LCORE; i++) {
357 sock_num = rte_lcore_to_socket_id(i) + 1;
358 if (sock_num > max_socket) {
359 if (sock_num > RTE_MAX_NUMA_NODES)
360 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361 max_socket = sock_num;
363 if (!rte_lcore_is_enabled(i))
365 if (i == rte_get_master_lcore())
367 fwd_lcores_cpuids[nb_lc++] = i;
369 nb_lcores = (lcoreid_t) nb_lc;
370 nb_cfg_lcores = nb_lcores;
375 set_def_peer_eth_addrs(void)
379 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381 peer_eth_addrs[i].addr_bytes[5] = i;
386 set_default_fwd_ports_config(void)
390 for (pt_id = 0; pt_id < nb_ports; pt_id++)
391 fwd_ports_ids[pt_id] = pt_id;
393 nb_cfg_ports = nb_ports;
394 nb_fwd_ports = nb_ports;
398 set_def_fwd_config(void)
400 set_default_fwd_lcores_config();
401 set_def_peer_eth_addrs();
402 set_default_fwd_ports_config();
406 * Configuration initialisation done once at init time.
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410 unsigned int socket_id)
412 char pool_name[RTE_MEMPOOL_NAMESIZE];
413 struct rte_mempool *rte_mp = NULL;
416 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
420 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
421 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
423 #ifdef RTE_LIBRTE_PMD_XENVIRT
424 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
425 (unsigned) mb_mempool_cache,
426 sizeof(struct rte_pktmbuf_pool_private),
427 rte_pktmbuf_pool_init, NULL,
428 rte_pktmbuf_init, NULL,
432 /* if the former XEN allocation failed fall back to normal allocation */
433 if (rte_mp == NULL) {
435 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
436 mb_size, (unsigned) mb_mempool_cache,
437 sizeof(struct rte_pktmbuf_pool_private),
440 if (rte_mempool_populate_anon(rte_mp) == 0) {
441 rte_mempool_free(rte_mp);
444 rte_pktmbuf_pool_init(rte_mp, NULL);
445 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
447 /* wrapper to rte_mempool_create() */
448 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
449 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
453 if (rte_mp == NULL) {
454 rte_exit(EXIT_FAILURE,
455 "Creation of mbuf pool for socket %u failed: %s\n",
456 socket_id, rte_strerror(rte_errno));
457 } else if (verbose_level > 0) {
458 rte_mempool_dump(stdout, rte_mp);
463 * Check given socket id is valid or not with NUMA mode,
464 * if valid, return 0, else return -1
467 check_socket_id(const unsigned int socket_id)
469 static int warning_once = 0;
471 if (socket_id >= max_socket) {
472 if (!warning_once && numa_support)
473 printf("Warning: NUMA should be configured manually by"
474 " using --port-numa-config and"
475 " --ring-numa-config parameters along with"
487 struct rte_port *port;
488 struct rte_mempool *mbp;
489 unsigned int nb_mbuf_per_pool;
491 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
493 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
494 /* Configuration of logical cores. */
495 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
496 sizeof(struct fwd_lcore *) * nb_lcores,
497 RTE_CACHE_LINE_SIZE);
498 if (fwd_lcores == NULL) {
499 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
500 "failed\n", nb_lcores);
502 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
503 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
504 sizeof(struct fwd_lcore),
505 RTE_CACHE_LINE_SIZE);
506 if (fwd_lcores[lc_id] == NULL) {
507 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
510 fwd_lcores[lc_id]->cpuid_idx = lc_id;
514 * Create pools of mbuf.
515 * If NUMA support is disabled, create a single pool of mbuf in
516 * socket 0 memory by default.
517 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
519 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
520 * nb_txd can be configured at run time.
522 if (param_total_num_mbufs)
523 nb_mbuf_per_pool = param_total_num_mbufs;
525 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
526 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
530 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
534 if (socket_num == UMA_NO_CONFIG)
535 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
537 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
541 FOREACH_PORT(pid, ports) {
543 rte_eth_dev_info_get(pid, &port->dev_info);
546 if (port_numa[pid] != NUMA_NO_CONFIG)
547 port_per_socket[port_numa[pid]]++;
549 uint32_t socket_id = rte_eth_dev_socket_id(pid);
551 /* if socket_id is invalid, set to 0 */
552 if (check_socket_id(socket_id) < 0)
554 port_per_socket[socket_id]++;
558 /* set flag to initialize port/queue */
559 port->need_reconfig = 1;
560 port->need_reconfig_queues = 1;
565 unsigned int nb_mbuf;
567 if (param_total_num_mbufs)
568 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
570 for (i = 0; i < max_socket; i++) {
571 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
573 mbuf_pool_create(mbuf_data_size,
580 * Records which Mbuf pool to use by each logical core, if needed.
582 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583 mbp = mbuf_pool_find(
584 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
587 mbp = mbuf_pool_find(0);
588 fwd_lcores[lc_id]->mbp = mbp;
591 /* Configuration of packet forwarding streams. */
592 if (init_fwd_streams() < 0)
593 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
598 reconfig(portid_t new_port_id, unsigned socket_id)
600 struct rte_port *port;
602 /* Reconfiguration of Ethernet ports. */
603 port = &ports[new_port_id];
604 rte_eth_dev_info_get(new_port_id, &port->dev_info);
606 /* set flag to initialize port/queue */
607 port->need_reconfig = 1;
608 port->need_reconfig_queues = 1;
609 port->socket_id = socket_id;
616 init_fwd_streams(void)
619 struct rte_port *port;
620 streamid_t sm_id, nb_fwd_streams_new;
623 /* set socket id according to numa or not */
624 FOREACH_PORT(pid, ports) {
626 if (nb_rxq > port->dev_info.max_rx_queues) {
627 printf("Fail: nb_rxq(%d) is greater than "
628 "max_rx_queues(%d)\n", nb_rxq,
629 port->dev_info.max_rx_queues);
632 if (nb_txq > port->dev_info.max_tx_queues) {
633 printf("Fail: nb_txq(%d) is greater than "
634 "max_tx_queues(%d)\n", nb_txq,
635 port->dev_info.max_tx_queues);
639 if (port_numa[pid] != NUMA_NO_CONFIG)
640 port->socket_id = port_numa[pid];
642 port->socket_id = rte_eth_dev_socket_id(pid);
644 /* if socket_id is invalid, set to 0 */
645 if (check_socket_id(port->socket_id) < 0)
650 if (socket_num == UMA_NO_CONFIG)
653 port->socket_id = socket_num;
657 q = RTE_MAX(nb_rxq, nb_txq);
659 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
662 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
663 if (nb_fwd_streams_new == nb_fwd_streams)
666 if (fwd_streams != NULL) {
667 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
668 if (fwd_streams[sm_id] == NULL)
670 rte_free(fwd_streams[sm_id]);
671 fwd_streams[sm_id] = NULL;
673 rte_free(fwd_streams);
678 nb_fwd_streams = nb_fwd_streams_new;
679 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
681 if (fwd_streams == NULL)
682 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
683 "failed\n", nb_fwd_streams);
685 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
686 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
687 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
688 if (fwd_streams[sm_id] == NULL)
689 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
698 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
700 unsigned int total_burst;
701 unsigned int nb_burst;
702 unsigned int burst_stats[3];
703 uint16_t pktnb_stats[3];
705 int burst_percent[3];
708 * First compute the total number of packet bursts and the
709 * two highest numbers of bursts of the same number of packets.
712 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
713 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
714 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
715 nb_burst = pbs->pkt_burst_spread[nb_pkt];
718 total_burst += nb_burst;
719 if (nb_burst > burst_stats[0]) {
720 burst_stats[1] = burst_stats[0];
721 pktnb_stats[1] = pktnb_stats[0];
722 burst_stats[0] = nb_burst;
723 pktnb_stats[0] = nb_pkt;
726 if (total_burst == 0)
728 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
729 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
730 burst_percent[0], (int) pktnb_stats[0]);
731 if (burst_stats[0] == total_burst) {
735 if (burst_stats[0] + burst_stats[1] == total_burst) {
736 printf(" + %d%% of %d pkts]\n",
737 100 - burst_percent[0], pktnb_stats[1]);
740 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
741 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
742 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
743 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
746 printf(" + %d%% of %d pkts + %d%% of others]\n",
747 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
749 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
752 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
754 struct rte_port *port;
757 static const char *fwd_stats_border = "----------------------";
759 port = &ports[port_id];
760 printf("\n %s Forward statistics for port %-2d %s\n",
761 fwd_stats_border, port_id, fwd_stats_border);
763 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
764 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
766 stats->ipackets, stats->imissed,
767 (uint64_t) (stats->ipackets + stats->imissed));
769 if (cur_fwd_eng == &csum_fwd_engine)
770 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
771 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
772 if ((stats->ierrors + stats->rx_nombuf) > 0) {
773 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
774 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
777 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
779 stats->opackets, port->tx_dropped,
780 (uint64_t) (stats->opackets + port->tx_dropped));
783 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
785 stats->ipackets, stats->imissed,
786 (uint64_t) (stats->ipackets + stats->imissed));
788 if (cur_fwd_eng == &csum_fwd_engine)
789 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
790 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
791 if ((stats->ierrors + stats->rx_nombuf) > 0) {
792 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
793 printf(" RX-nombufs: %14"PRIu64"\n",
797 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
799 stats->opackets, port->tx_dropped,
800 (uint64_t) (stats->opackets + port->tx_dropped));
803 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
805 pkt_burst_stats_display("RX",
806 &port->rx_stream->rx_burst_stats);
808 pkt_burst_stats_display("TX",
809 &port->tx_stream->tx_burst_stats);
812 if (port->rx_queue_stats_mapping_enabled) {
814 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
815 printf(" Stats reg %2d RX-packets:%14"PRIu64
816 " RX-errors:%14"PRIu64
817 " RX-bytes:%14"PRIu64"\n",
818 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
822 if (port->tx_queue_stats_mapping_enabled) {
823 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
824 printf(" Stats reg %2d TX-packets:%14"PRIu64
825 " TX-bytes:%14"PRIu64"\n",
826 i, stats->q_opackets[i], stats->q_obytes[i]);
830 printf(" %s--------------------------------%s\n",
831 fwd_stats_border, fwd_stats_border);
835 fwd_stream_stats_display(streamid_t stream_id)
837 struct fwd_stream *fs;
838 static const char *fwd_top_stats_border = "-------";
840 fs = fwd_streams[stream_id];
841 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
842 (fs->fwd_dropped == 0))
844 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
845 "TX Port=%2d/Queue=%2d %s\n",
846 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
847 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
848 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
849 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
851 /* if checksum mode */
852 if (cur_fwd_eng == &csum_fwd_engine) {
853 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
854 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
857 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
858 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
859 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
864 flush_fwd_rx_queues(void)
866 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
874 for (j = 0; j < 2; j++) {
875 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
876 for (rxq = 0; rxq < nb_rxq; rxq++) {
877 port_id = fwd_ports_ids[rxp];
879 nb_rx = rte_eth_rx_burst(port_id, rxq,
880 pkts_burst, MAX_PKT_BURST);
881 for (i = 0; i < nb_rx; i++)
882 rte_pktmbuf_free(pkts_burst[i]);
886 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
891 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
893 struct fwd_stream **fsm;
897 fsm = &fwd_streams[fc->stream_idx];
898 nb_fs = fc->stream_nb;
900 for (sm_id = 0; sm_id < nb_fs; sm_id++)
901 (*pkt_fwd)(fsm[sm_id]);
902 } while (! fc->stopped);
906 start_pkt_forward_on_core(void *fwd_arg)
908 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
909 cur_fwd_config.fwd_eng->packet_fwd);
914 * Run the TXONLY packet forwarding engine to send a single burst of packets.
915 * Used to start communication flows in network loopback test configurations.
918 run_one_txonly_burst_on_core(void *fwd_arg)
920 struct fwd_lcore *fwd_lc;
921 struct fwd_lcore tmp_lcore;
923 fwd_lc = (struct fwd_lcore *) fwd_arg;
925 tmp_lcore.stopped = 1;
926 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
931 * Launch packet forwarding:
932 * - Setup per-port forwarding context.
933 * - launch logical cores with their forwarding configuration.
936 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
938 port_fwd_begin_t port_fwd_begin;
943 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
944 if (port_fwd_begin != NULL) {
945 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
946 (*port_fwd_begin)(fwd_ports_ids[i]);
948 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
949 lc_id = fwd_lcores_cpuids[i];
950 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
951 fwd_lcores[i]->stopped = 0;
952 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
953 fwd_lcores[i], lc_id);
955 printf("launch lcore %u failed - diag=%d\n",
962 * Launch packet forwarding configuration.
965 start_packet_forwarding(int with_tx_first)
967 port_fwd_begin_t port_fwd_begin;
968 port_fwd_end_t port_fwd_end;
969 struct rte_port *port;
974 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
975 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
977 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
978 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
980 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
981 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
982 (!nb_rxq || !nb_txq))
983 rte_exit(EXIT_FAILURE,
984 "Either rxq or txq are 0, cannot use %s fwd mode\n",
985 cur_fwd_eng->fwd_mode_name);
987 if (all_ports_started() == 0) {
988 printf("Not all ports were started\n");
991 if (test_done == 0) {
992 printf("Packet forwarding already started\n");
996 for (i = 0; i < nb_fwd_ports; i++) {
997 pt_id = fwd_ports_ids[i];
998 port = &ports[pt_id];
999 if (!port->dcb_flag) {
1000 printf("In DCB mode, all forwarding ports must "
1001 "be configured in this mode.\n");
1005 if (nb_fwd_lcores == 1) {
1006 printf("In DCB mode,the nb forwarding cores "
1007 "should be larger than 1.\n");
1014 flush_fwd_rx_queues();
1017 rxtx_config_display();
1019 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1020 pt_id = fwd_ports_ids[i];
1021 port = &ports[pt_id];
1022 rte_eth_stats_get(pt_id, &port->stats);
1023 port->tx_dropped = 0;
1025 map_port_queue_stats_mapping_registers(pt_id, port);
1027 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1028 fwd_streams[sm_id]->rx_packets = 0;
1029 fwd_streams[sm_id]->tx_packets = 0;
1030 fwd_streams[sm_id]->fwd_dropped = 0;
1031 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1032 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1034 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1035 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1036 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1037 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1038 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1040 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1041 fwd_streams[sm_id]->core_cycles = 0;
1044 if (with_tx_first) {
1045 port_fwd_begin = tx_only_engine.port_fwd_begin;
1046 if (port_fwd_begin != NULL) {
1047 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1048 (*port_fwd_begin)(fwd_ports_ids[i]);
1050 launch_packet_forwarding(run_one_txonly_burst_on_core);
1051 rte_eal_mp_wait_lcore();
1052 port_fwd_end = tx_only_engine.port_fwd_end;
1053 if (port_fwd_end != NULL) {
1054 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1055 (*port_fwd_end)(fwd_ports_ids[i]);
1058 launch_packet_forwarding(start_pkt_forward_on_core);
1062 stop_packet_forwarding(void)
1064 struct rte_eth_stats stats;
1065 struct rte_port *port;
1066 port_fwd_end_t port_fwd_end;
1071 uint64_t total_recv;
1072 uint64_t total_xmit;
1073 uint64_t total_rx_dropped;
1074 uint64_t total_tx_dropped;
1075 uint64_t total_rx_nombuf;
1076 uint64_t tx_dropped;
1077 uint64_t rx_bad_ip_csum;
1078 uint64_t rx_bad_l4_csum;
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1080 uint64_t fwd_cycles;
1082 static const char *acc_stats_border = "+++++++++++++++";
1084 if (all_ports_started() == 0) {
1085 printf("Not all ports were started\n");
1089 printf("Packet forwarding not started\n");
1092 printf("Telling cores to stop...");
1093 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1094 fwd_lcores[lc_id]->stopped = 1;
1095 printf("\nWaiting for lcores to finish...\n");
1096 rte_eal_mp_wait_lcore();
1097 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1098 if (port_fwd_end != NULL) {
1099 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1100 pt_id = fwd_ports_ids[i];
1101 (*port_fwd_end)(pt_id);
1104 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1107 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1108 if (cur_fwd_config.nb_fwd_streams >
1109 cur_fwd_config.nb_fwd_ports) {
1110 fwd_stream_stats_display(sm_id);
1111 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1112 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1114 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1116 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1119 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1120 tx_dropped = (uint64_t) (tx_dropped +
1121 fwd_streams[sm_id]->fwd_dropped);
1122 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1125 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1126 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1127 fwd_streams[sm_id]->rx_bad_ip_csum);
1128 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1132 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1133 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1134 fwd_streams[sm_id]->rx_bad_l4_csum);
1135 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1138 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139 fwd_cycles = (uint64_t) (fwd_cycles +
1140 fwd_streams[sm_id]->core_cycles);
1145 total_rx_dropped = 0;
1146 total_tx_dropped = 0;
1147 total_rx_nombuf = 0;
1148 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1149 pt_id = fwd_ports_ids[i];
1151 port = &ports[pt_id];
1152 rte_eth_stats_get(pt_id, &stats);
1153 stats.ipackets -= port->stats.ipackets;
1154 port->stats.ipackets = 0;
1155 stats.opackets -= port->stats.opackets;
1156 port->stats.opackets = 0;
1157 stats.ibytes -= port->stats.ibytes;
1158 port->stats.ibytes = 0;
1159 stats.obytes -= port->stats.obytes;
1160 port->stats.obytes = 0;
1161 stats.imissed -= port->stats.imissed;
1162 port->stats.imissed = 0;
1163 stats.oerrors -= port->stats.oerrors;
1164 port->stats.oerrors = 0;
1165 stats.rx_nombuf -= port->stats.rx_nombuf;
1166 port->stats.rx_nombuf = 0;
1168 total_recv += stats.ipackets;
1169 total_xmit += stats.opackets;
1170 total_rx_dropped += stats.imissed;
1171 total_tx_dropped += port->tx_dropped;
1172 total_rx_nombuf += stats.rx_nombuf;
1174 fwd_port_stats_display(pt_id, &stats);
1176 printf("\n %s Accumulated forward statistics for all ports"
1178 acc_stats_border, acc_stats_border);
1179 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1181 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1183 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1184 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1185 if (total_rx_nombuf > 0)
1186 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1187 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1189 acc_stats_border, acc_stats_border);
1190 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1192 printf("\n CPU cycles/packet=%u (total cycles="
1193 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1194 (unsigned int)(fwd_cycles / total_recv),
1195 fwd_cycles, total_recv);
1197 printf("\nDone.\n");
1202 dev_set_link_up(portid_t pid)
1204 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1205 printf("\nSet link up fail.\n");
1209 dev_set_link_down(portid_t pid)
1211 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1212 printf("\nSet link down fail.\n");
1216 all_ports_started(void)
1219 struct rte_port *port;
1221 FOREACH_PORT(pi, ports) {
1223 /* Check if there is a port which is not started */
1224 if ((port->port_status != RTE_PORT_STARTED) &&
1225 (port->slave_flag == 0))
1229 /* No port is not started */
1234 all_ports_stopped(void)
1237 struct rte_port *port;
1239 FOREACH_PORT(pi, ports) {
1241 if ((port->port_status != RTE_PORT_STOPPED) &&
1242 (port->slave_flag == 0))
1250 port_is_started(portid_t port_id)
1252 if (port_id_is_invalid(port_id, ENABLED_WARN))
1255 if (ports[port_id].port_status != RTE_PORT_STARTED)
1262 port_is_closed(portid_t port_id)
1264 if (port_id_is_invalid(port_id, ENABLED_WARN))
1267 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1274 start_port(portid_t pid)
1276 int diag, need_check_link_status = -1;
1279 struct rte_port *port;
1280 struct ether_addr mac_addr;
1282 if (test_done == 0) {
1283 printf("Please stop forwarding first\n");
1287 if (port_id_is_invalid(pid, ENABLED_WARN))
1290 if (init_fwd_streams() < 0) {
1291 printf("Fail from init_fwd_streams()\n");
1297 FOREACH_PORT(pi, ports) {
1298 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1301 need_check_link_status = 0;
1303 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1304 RTE_PORT_HANDLING) == 0) {
1305 printf("Port %d is now not stopped\n", pi);
1309 if (port->need_reconfig > 0) {
1310 port->need_reconfig = 0;
1312 printf("Configuring Port %d (socket %u)\n", pi,
1314 /* configure port */
1315 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1318 if (rte_atomic16_cmpset(&(port->port_status),
1319 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1320 printf("Port %d can not be set back "
1321 "to stopped\n", pi);
1322 printf("Fail to configure port %d\n", pi);
1323 /* try to reconfigure port next time */
1324 port->need_reconfig = 1;
1328 if (port->need_reconfig_queues > 0) {
1329 port->need_reconfig_queues = 0;
1330 /* setup tx queues */
1331 for (qi = 0; qi < nb_txq; qi++) {
1332 if ((numa_support) &&
1333 (txring_numa[pi] != NUMA_NO_CONFIG))
1334 diag = rte_eth_tx_queue_setup(pi, qi,
1335 nb_txd,txring_numa[pi],
1338 diag = rte_eth_tx_queue_setup(pi, qi,
1339 nb_txd,port->socket_id,
1345 /* Fail to setup tx queue, return */
1346 if (rte_atomic16_cmpset(&(port->port_status),
1348 RTE_PORT_STOPPED) == 0)
1349 printf("Port %d can not be set back "
1350 "to stopped\n", pi);
1351 printf("Fail to configure port %d tx queues\n", pi);
1352 /* try to reconfigure queues next time */
1353 port->need_reconfig_queues = 1;
1356 /* setup rx queues */
1357 for (qi = 0; qi < nb_rxq; qi++) {
1358 if ((numa_support) &&
1359 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1360 struct rte_mempool * mp =
1361 mbuf_pool_find(rxring_numa[pi]);
1363 printf("Failed to setup RX queue:"
1364 "No mempool allocation"
1365 "on the socket %d\n",
1370 diag = rte_eth_rx_queue_setup(pi, qi,
1371 nb_rxd,rxring_numa[pi],
1372 &(port->rx_conf),mp);
1375 diag = rte_eth_rx_queue_setup(pi, qi,
1376 nb_rxd,port->socket_id,
1378 mbuf_pool_find(port->socket_id));
1384 /* Fail to setup rx queue, return */
1385 if (rte_atomic16_cmpset(&(port->port_status),
1387 RTE_PORT_STOPPED) == 0)
1388 printf("Port %d can not be set back "
1389 "to stopped\n", pi);
1390 printf("Fail to configure port %d rx queues\n", pi);
1391 /* try to reconfigure queues next time */
1392 port->need_reconfig_queues = 1;
1397 if (rte_eth_dev_start(pi) < 0) {
1398 printf("Fail to start port %d\n", pi);
1400 /* Fail to setup rx queue, return */
1401 if (rte_atomic16_cmpset(&(port->port_status),
1402 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1403 printf("Port %d can not be set back to "
1408 if (rte_atomic16_cmpset(&(port->port_status),
1409 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1410 printf("Port %d can not be set into started\n", pi);
1412 rte_eth_macaddr_get(pi, &mac_addr);
1413 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1414 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1415 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1416 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1418 /* at least one port started, need checking link status */
1419 need_check_link_status = 1;
1422 if (need_check_link_status == 1 && !no_link_check)
1423 check_all_ports_link_status(RTE_PORT_ALL);
1424 else if (need_check_link_status == 0)
1425 printf("Please stop the ports first\n");
1432 stop_port(portid_t pid)
1435 struct rte_port *port;
1436 int need_check_link_status = 0;
1438 if (test_done == 0) {
1439 printf("Please stop forwarding first\n");
1447 if (port_id_is_invalid(pid, ENABLED_WARN))
1450 printf("Stopping ports...\n");
1452 FOREACH_PORT(pi, ports) {
1453 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1457 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1458 RTE_PORT_HANDLING) == 0)
1461 rte_eth_dev_stop(pi);
1463 if (rte_atomic16_cmpset(&(port->port_status),
1464 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1465 printf("Port %d can not be set into stopped\n", pi);
1466 need_check_link_status = 1;
1468 if (need_check_link_status && !no_link_check)
1469 check_all_ports_link_status(RTE_PORT_ALL);
1475 close_port(portid_t pid)
1478 struct rte_port *port;
1480 if (test_done == 0) {
1481 printf("Please stop forwarding first\n");
1485 if (port_id_is_invalid(pid, ENABLED_WARN))
1488 printf("Closing ports...\n");
1490 FOREACH_PORT(pi, ports) {
1491 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1495 if (rte_atomic16_cmpset(&(port->port_status),
1496 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1497 printf("Port %d is already closed\n", pi);
1501 if (rte_atomic16_cmpset(&(port->port_status),
1502 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1503 printf("Port %d is now not stopped\n", pi);
1507 rte_eth_dev_close(pi);
1509 if (rte_atomic16_cmpset(&(port->port_status),
1510 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1511 printf("Port %d cannot be set to closed\n", pi);
1518 attach_port(char *identifier)
1520 portid_t i, j, pi = 0;
1522 printf("Attaching a new port...\n");
1524 if (identifier == NULL) {
1525 printf("Invalid parameters are specified\n");
1529 if (test_done == 0) {
1530 printf("Please stop forwarding first\n");
1534 if (rte_eth_dev_attach(identifier, &pi))
1537 ports[pi].enabled = 1;
1538 reconfig(pi, rte_eth_dev_socket_id(pi));
1539 rte_eth_promiscuous_enable(pi);
1541 nb_ports = rte_eth_dev_count();
1543 /* set_default_fwd_ports_config(); */
1544 memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1546 FOREACH_PORT(j, ports) {
1547 fwd_ports_ids[i] = j;
1550 nb_cfg_ports = nb_ports;
1553 ports[pi].port_status = RTE_PORT_STOPPED;
1555 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1560 detach_port(uint8_t port_id)
1563 char name[RTE_ETH_NAME_MAX_LEN];
1565 printf("Detaching a port...\n");
1567 if (!port_is_closed(port_id)) {
1568 printf("Please close port first\n");
1572 if (rte_eth_dev_detach(port_id, name))
1575 ports[port_id].enabled = 0;
1576 nb_ports = rte_eth_dev_count();
1578 /* set_default_fwd_ports_config(); */
1579 memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1581 FOREACH_PORT(pi, ports) {
1582 fwd_ports_ids[i] = pi;
1585 nb_cfg_ports = nb_ports;
1588 printf("Port '%s' is detached. Now total ports is %d\n",
1600 stop_packet_forwarding();
1602 if (ports != NULL) {
1604 FOREACH_PORT(pt_id, ports) {
1605 printf("\nShutting down port %d...\n", pt_id);
1611 printf("\nBye...\n");
1614 typedef void (*cmd_func_t)(void);
1615 struct pmd_test_command {
1616 const char *cmd_name;
1617 cmd_func_t cmd_func;
1620 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1622 /* Check the link status of all ports in up to 9s, and print them finally */
1624 check_all_ports_link_status(uint32_t port_mask)
1626 #define CHECK_INTERVAL 100 /* 100ms */
1627 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1628 uint8_t portid, count, all_ports_up, print_flag = 0;
1629 struct rte_eth_link link;
1631 printf("Checking link statuses...\n");
1633 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1635 FOREACH_PORT(portid, ports) {
1636 if ((port_mask & (1 << portid)) == 0)
1638 memset(&link, 0, sizeof(link));
1639 rte_eth_link_get_nowait(portid, &link);
1640 /* print link status if flag set */
1641 if (print_flag == 1) {
1642 if (link.link_status)
1643 printf("Port %d Link Up - speed %u "
1644 "Mbps - %s\n", (uint8_t)portid,
1645 (unsigned)link.link_speed,
1646 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1647 ("full-duplex") : ("half-duplex\n"));
1649 printf("Port %d Link Down\n",
1653 /* clear all_ports_up flag if any link down */
1654 if (link.link_status == ETH_LINK_DOWN) {
1659 /* after finally printing all link status, get out */
1660 if (print_flag == 1)
1663 if (all_ports_up == 0) {
1665 rte_delay_ms(CHECK_INTERVAL);
1668 /* set the print_flag if all ports up or timeout */
1669 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1676 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1680 uint8_t mapping_found = 0;
1682 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1683 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1684 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1685 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1686 tx_queue_stats_mappings[i].queue_id,
1687 tx_queue_stats_mappings[i].stats_counter_id);
1694 port->tx_queue_stats_mapping_enabled = 1;
1699 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1703 uint8_t mapping_found = 0;
1705 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1706 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1707 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1708 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1709 rx_queue_stats_mappings[i].queue_id,
1710 rx_queue_stats_mappings[i].stats_counter_id);
1717 port->rx_queue_stats_mapping_enabled = 1;
1722 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1726 diag = set_tx_queue_stats_mapping_registers(pi, port);
1728 if (diag == -ENOTSUP) {
1729 port->tx_queue_stats_mapping_enabled = 0;
1730 printf("TX queue stats mapping not supported port id=%d\n", pi);
1733 rte_exit(EXIT_FAILURE,
1734 "set_tx_queue_stats_mapping_registers "
1735 "failed for port id=%d diag=%d\n",
1739 diag = set_rx_queue_stats_mapping_registers(pi, port);
1741 if (diag == -ENOTSUP) {
1742 port->rx_queue_stats_mapping_enabled = 0;
1743 printf("RX queue stats mapping not supported port id=%d\n", pi);
1746 rte_exit(EXIT_FAILURE,
1747 "set_rx_queue_stats_mapping_registers "
1748 "failed for port id=%d diag=%d\n",
1754 rxtx_port_config(struct rte_port *port)
1756 port->rx_conf = port->dev_info.default_rxconf;
1757 port->tx_conf = port->dev_info.default_txconf;
1759 /* Check if any RX/TX parameters have been passed */
1760 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1761 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1763 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1764 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1766 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1767 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1769 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1770 port->rx_conf.rx_free_thresh = rx_free_thresh;
1772 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1773 port->rx_conf.rx_drop_en = rx_drop_en;
1775 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1776 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1778 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1779 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1781 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1782 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1784 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1785 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1787 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1788 port->tx_conf.tx_free_thresh = tx_free_thresh;
1790 if (txq_flags != RTE_PMD_PARAM_UNSET)
1791 port->tx_conf.txq_flags = txq_flags;
1795 init_port_config(void)
1798 struct rte_port *port;
1800 FOREACH_PORT(pid, ports) {
1802 port->dev_conf.rxmode = rx_mode;
1803 port->dev_conf.fdir_conf = fdir_conf;
1805 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1806 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1808 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1809 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1812 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1813 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1814 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1816 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1819 if (port->dev_info.max_vfs != 0) {
1820 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1821 port->dev_conf.rxmode.mq_mode =
1824 port->dev_conf.rxmode.mq_mode =
1827 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1830 rxtx_port_config(port);
1832 rte_eth_macaddr_get(pid, &port->eth_addr);
1834 map_port_queue_stats_mapping_registers(pid, port);
1835 #ifdef RTE_NIC_BYPASS
1836 rte_eth_dev_bypass_init(pid);
1841 void set_port_slave_flag(portid_t slave_pid)
1843 struct rte_port *port;
1845 port = &ports[slave_pid];
1846 port->slave_flag = 1;
1849 void clear_port_slave_flag(portid_t slave_pid)
1851 struct rte_port *port;
1853 port = &ports[slave_pid];
1854 port->slave_flag = 0;
1857 const uint16_t vlan_tags[] = {
1858 0, 1, 2, 3, 4, 5, 6, 7,
1859 8, 9, 10, 11, 12, 13, 14, 15,
1860 16, 17, 18, 19, 20, 21, 22, 23,
1861 24, 25, 26, 27, 28, 29, 30, 31
1865 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1866 enum dcb_mode_enable dcb_mode,
1867 enum rte_eth_nb_tcs num_tcs,
1873 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1874 * given above, and the number of traffic classes available for use.
1876 if (dcb_mode == DCB_VT_ENABLED) {
1877 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1878 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1879 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1880 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1882 /* VMDQ+DCB RX and TX configrations */
1883 vmdq_rx_conf->enable_default_pool = 0;
1884 vmdq_rx_conf->default_pool = 0;
1885 vmdq_rx_conf->nb_queue_pools =
1886 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1887 vmdq_tx_conf->nb_queue_pools =
1888 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1890 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1891 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1892 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1893 vmdq_rx_conf->pool_map[i].pools =
1894 1 << (i % vmdq_rx_conf->nb_queue_pools);
1896 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1897 vmdq_rx_conf->dcb_tc[i] = i;
1898 vmdq_tx_conf->dcb_tc[i] = i;
1901 /* set DCB mode of RX and TX of multiple queues */
1902 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1903 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1905 struct rte_eth_dcb_rx_conf *rx_conf =
1906 ð_conf->rx_adv_conf.dcb_rx_conf;
1907 struct rte_eth_dcb_tx_conf *tx_conf =
1908 ð_conf->tx_adv_conf.dcb_tx_conf;
1910 rx_conf->nb_tcs = num_tcs;
1911 tx_conf->nb_tcs = num_tcs;
1913 for (i = 0; i < num_tcs; i++) {
1914 rx_conf->dcb_tc[i] = i;
1915 tx_conf->dcb_tc[i] = i;
1917 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1918 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1919 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1923 eth_conf->dcb_capability_en =
1924 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1926 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1932 init_port_dcb_config(portid_t pid,
1933 enum dcb_mode_enable dcb_mode,
1934 enum rte_eth_nb_tcs num_tcs,
1937 struct rte_eth_conf port_conf;
1938 struct rte_eth_dev_info dev_info;
1939 struct rte_port *rte_port;
1943 rte_eth_dev_info_get(pid, &dev_info);
1945 /* If dev_info.vmdq_pool_base is greater than 0,
1946 * the queue id of vmdq pools is started after pf queues.
1948 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1949 printf("VMDQ_DCB multi-queue mode is nonsensical"
1950 " for port %d.", pid);
1954 /* Assume the ports in testpmd have the same dcb capability
1955 * and has the same number of rxq and txq in dcb mode
1957 if (dcb_mode == DCB_VT_ENABLED) {
1958 nb_rxq = dev_info.max_rx_queues;
1959 nb_txq = dev_info.max_tx_queues;
1961 /*if vt is disabled, use all pf queues */
1962 if (dev_info.vmdq_pool_base == 0) {
1963 nb_rxq = dev_info.max_rx_queues;
1964 nb_txq = dev_info.max_tx_queues;
1966 nb_rxq = (queueid_t)num_tcs;
1967 nb_txq = (queueid_t)num_tcs;
1971 rx_free_thresh = 64;
1973 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1974 /* Enter DCB configuration status */
1977 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1978 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1982 rte_port = &ports[pid];
1983 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1985 rxtx_port_config(rte_port);
1987 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1988 for (i = 0; i < RTE_DIM(vlan_tags); i++)
1989 rx_vft_set(pid, vlan_tags[i], 1);
1991 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1992 map_port_queue_stats_mapping_registers(pid, rte_port);
1994 rte_port->dcb_flag = 1;
2004 /* Configuration of Ethernet ports. */
2005 ports = rte_zmalloc("testpmd: ports",
2006 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2007 RTE_CACHE_LINE_SIZE);
2008 if (ports == NULL) {
2009 rte_exit(EXIT_FAILURE,
2010 "rte_zmalloc(%d struct rte_port) failed\n",
2014 /* enabled allocated ports */
2015 for (pid = 0; pid < nb_ports; pid++)
2016 ports[pid].enabled = 1;
2027 signal_handler(int signum)
2029 if (signum == SIGINT || signum == SIGTERM) {
2030 printf("\nSignal %d received, preparing to exit...\n",
2033 /* exit with the expected status */
2034 signal(signum, SIG_DFL);
2035 kill(getpid(), signum);
2040 main(int argc, char** argv)
2045 signal(SIGINT, signal_handler);
2046 signal(SIGTERM, signal_handler);
2048 diag = rte_eal_init(argc, argv);
2050 rte_panic("Cannot init EAL\n");
2052 nb_ports = (portid_t) rte_eth_dev_count();
2054 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2056 /* allocate port structures, and init them */
2059 set_def_fwd_config();
2061 rte_panic("Empty set of forwarding logical cores - check the "
2062 "core mask supplied in the command parameters\n");
2067 launch_args_parse(argc, argv);
2069 if (!nb_rxq && !nb_txq)
2070 printf("Warning: Either rx or tx queues should be non-zero\n");
2072 if (nb_rxq > 1 && nb_rxq > nb_txq)
2073 printf("Warning: nb_rxq=%d enables RSS configuration, "
2074 "but nb_txq=%d will prevent to fully test it.\n",
2078 if (start_port(RTE_PORT_ALL) != 0)
2079 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2081 /* set all ports to promiscuous mode by default */
2082 FOREACH_PORT(port_id, ports)
2083 rte_eth_promiscuous_enable(port_id);
2085 #ifdef RTE_LIBRTE_CMDLINE
2086 if (interactive == 1) {
2088 printf("Start automatic packet forwarding\n");
2089 start_packet_forwarding(0);
2098 printf("No commandline core given, start packet forwarding\n");
2099 start_packet_forwarding(0);
2100 printf("Press enter to exit\n");
2101 rc = read(0, &c, 1);