4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
96 uint16_t verbose_level = 0; /**< Silent by default. */
98 /* use master core for command line ? */
99 uint8_t interactive = 0;
100 uint8_t auto_start = 0;
102 char cmdline_filename[PATH_MAX] = {0};
105 * NUMA support configuration.
106 * When set, the NUMA support attempts to dispatch the allocation of the
107 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
108 * probed ports among the CPU sockets 0 and 1.
109 * Otherwise, all memory is allocated from CPU socket 0.
111 uint8_t numa_support = 1; /**< numa enabled by default */
114 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
117 uint8_t socket_num = UMA_NO_CONFIG;
120 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
125 * Record the Ethernet address of peer target ports to which packets are
127 * Must be instantiated with the ethernet addresses of peer traffic generator
130 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
131 portid_t nb_peer_eth_addrs = 0;
134 * Probed Target Environment.
136 struct rte_port *ports; /**< For all probed ethernet ports. */
137 portid_t nb_ports; /**< Number of probed ethernet ports. */
138 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
139 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
142 * Test Forwarding Configuration.
143 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
144 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
146 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
147 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
148 portid_t nb_cfg_ports; /**< Number of configured ports. */
149 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
151 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
152 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
154 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
155 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
158 * Forwarding engines.
160 struct fwd_engine * fwd_engines[] = {
169 #ifdef RTE_LIBRTE_IEEE1588
170 &ieee1588_fwd_engine,
175 struct fwd_config cur_fwd_config;
176 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
177 uint32_t retry_enabled;
178 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
179 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
182 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
183 * specified on command-line. */
184 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186 * Configuration of packet segments used by the "txonly" processing engine.
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 TXONLY_DEF_PACKET_LEN,
192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
207 * Configurable number of RX/TX queues.
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
213 * Configurable number of RX/TX ring descriptors.
215 #define RTE_TEST_RX_DESC_DEFAULT 128
216 #define RTE_TEST_TX_DESC_DEFAULT 512
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 #define RTE_PMD_PARAM_UNSET -1
222 * Configurable values of RX and TX ring threshold registers.
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX free threshold.
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
239 * Configurable value of RX drop enable.
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX free threshold.
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX RS bit threshold.
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
254 * Configurable value of TX queue flags.
256 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
259 * Receive Side Scaling (RSS) configuration.
261 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
264 * Port topology configuration
266 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
269 * Avoids to flush all the RX streams before starts forwarding.
271 uint8_t no_flush_rx = 0; /* flush by default */
274 * Avoids to check link status when starting/stopping a port.
276 uint8_t no_link_check = 0; /* check by default */
279 * Enable link status change notification
281 uint8_t lsc_interrupt = 1; /* enabled by default */
284 * Enable device removal notification.
286 uint8_t rmv_interrupt = 1; /* enabled by default */
289 * Display or mask ether events
290 * Default to all events except VF_MBOX
292 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
293 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
294 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
295 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
296 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
300 * NIC bypass mode configuration options.
303 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
304 /* The NIC bypass watchdog timeout. */
305 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #ifdef RTE_LIBRTE_LATENCY_STATS
312 * Set when latency stats is enabled in the commandline
314 uint8_t latencystats_enabled;
317 * Lcore ID to serive latency statistics.
319 lcoreid_t latencystats_lcore_id = -1;
324 * Ethernet device configuration.
326 struct rte_eth_rxmode rx_mode = {
327 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
329 .header_split = 0, /**< Header Split disabled. */
330 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
331 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
332 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
333 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
334 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
335 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
338 struct rte_fdir_conf fdir_conf = {
339 .mode = RTE_FDIR_MODE_NONE,
340 .pballoc = RTE_FDIR_PBALLOC_64K,
341 .status = RTE_FDIR_REPORT_STATUS,
343 .vlan_tci_mask = 0x0,
345 .src_ip = 0xFFFFFFFF,
346 .dst_ip = 0xFFFFFFFF,
349 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
350 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
352 .src_port_mask = 0xFFFF,
353 .dst_port_mask = 0xFFFF,
354 .mac_addr_byte_mask = 0xFF,
355 .tunnel_type_mask = 1,
356 .tunnel_id_mask = 0xFFFFFFFF,
361 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
363 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
364 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
366 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
367 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
369 uint16_t nb_tx_queue_stats_mappings = 0;
370 uint16_t nb_rx_queue_stats_mappings = 0;
372 unsigned int num_sockets = 0;
373 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
375 #ifdef RTE_LIBRTE_BITRATE
376 /* Bitrate statistics */
377 struct rte_stats_bitrates *bitrate_data;
378 lcoreid_t bitrate_lcore_id;
379 uint8_t bitrate_enabled;
382 /* Forward function declarations */
383 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
384 static void check_all_ports_link_status(uint32_t port_mask);
385 static int eth_event_callback(uint8_t port_id,
386 enum rte_eth_event_type type,
387 void *param, void *ret_param);
390 * Check if all the ports are started.
391 * If yes, return positive value. If not, return zero.
393 static int all_ports_started(void);
396 * Helper function to check if socket is already discovered.
397 * If yes, return positive value. If not, return zero.
400 new_socket_id(unsigned int socket_id)
404 for (i = 0; i < num_sockets; i++) {
405 if (socket_ids[i] == socket_id)
412 * Setup default configuration.
415 set_default_fwd_lcores_config(void)
419 unsigned int sock_num;
422 for (i = 0; i < RTE_MAX_LCORE; i++) {
423 sock_num = rte_lcore_to_socket_id(i);
424 if (new_socket_id(sock_num)) {
425 if (num_sockets >= RTE_MAX_NUMA_NODES) {
426 rte_exit(EXIT_FAILURE,
427 "Total sockets greater than %u\n",
430 socket_ids[num_sockets++] = sock_num;
432 if (!rte_lcore_is_enabled(i))
434 if (i == rte_get_master_lcore())
436 fwd_lcores_cpuids[nb_lc++] = i;
438 nb_lcores = (lcoreid_t) nb_lc;
439 nb_cfg_lcores = nb_lcores;
444 set_def_peer_eth_addrs(void)
448 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
449 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
450 peer_eth_addrs[i].addr_bytes[5] = i;
455 set_default_fwd_ports_config(void)
459 for (pt_id = 0; pt_id < nb_ports; pt_id++)
460 fwd_ports_ids[pt_id] = pt_id;
462 nb_cfg_ports = nb_ports;
463 nb_fwd_ports = nb_ports;
467 set_def_fwd_config(void)
469 set_default_fwd_lcores_config();
470 set_def_peer_eth_addrs();
471 set_default_fwd_ports_config();
475 * Configuration initialisation done once at init time.
478 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
479 unsigned int socket_id)
481 char pool_name[RTE_MEMPOOL_NAMESIZE];
482 struct rte_mempool *rte_mp = NULL;
485 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
486 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
489 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
490 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
492 #ifdef RTE_LIBRTE_PMD_XENVIRT
493 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
494 (unsigned) mb_mempool_cache,
495 sizeof(struct rte_pktmbuf_pool_private),
496 rte_pktmbuf_pool_init, NULL,
497 rte_pktmbuf_init, NULL,
501 /* if the former XEN allocation failed fall back to normal allocation */
502 if (rte_mp == NULL) {
504 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
505 mb_size, (unsigned) mb_mempool_cache,
506 sizeof(struct rte_pktmbuf_pool_private),
511 if (rte_mempool_populate_anon(rte_mp) == 0) {
512 rte_mempool_free(rte_mp);
516 rte_pktmbuf_pool_init(rte_mp, NULL);
517 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
519 /* wrapper to rte_mempool_create() */
520 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
521 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
526 if (rte_mp == NULL) {
527 rte_exit(EXIT_FAILURE,
528 "Creation of mbuf pool for socket %u failed: %s\n",
529 socket_id, rte_strerror(rte_errno));
530 } else if (verbose_level > 0) {
531 rte_mempool_dump(stdout, rte_mp);
536 * Check given socket id is valid or not with NUMA mode,
537 * if valid, return 0, else return -1
540 check_socket_id(const unsigned int socket_id)
542 static int warning_once = 0;
544 if (new_socket_id(socket_id)) {
545 if (!warning_once && numa_support)
546 printf("Warning: NUMA should be configured manually by"
547 " using --port-numa-config and"
548 " --ring-numa-config parameters along with"
560 struct rte_port *port;
561 struct rte_mempool *mbp;
562 unsigned int nb_mbuf_per_pool;
564 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
566 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
569 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
571 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
574 /* Configuration of logical cores. */
575 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
576 sizeof(struct fwd_lcore *) * nb_lcores,
577 RTE_CACHE_LINE_SIZE);
578 if (fwd_lcores == NULL) {
579 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
580 "failed\n", nb_lcores);
582 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
584 sizeof(struct fwd_lcore),
585 RTE_CACHE_LINE_SIZE);
586 if (fwd_lcores[lc_id] == NULL) {
587 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
590 fwd_lcores[lc_id]->cpuid_idx = lc_id;
593 RTE_ETH_FOREACH_DEV(pid) {
595 rte_eth_dev_info_get(pid, &port->dev_info);
598 if (port_numa[pid] != NUMA_NO_CONFIG)
599 port_per_socket[port_numa[pid]]++;
601 uint32_t socket_id = rte_eth_dev_socket_id(pid);
603 /* if socket_id is invalid, set to 0 */
604 if (check_socket_id(socket_id) < 0)
606 port_per_socket[socket_id]++;
610 /* set flag to initialize port/queue */
611 port->need_reconfig = 1;
612 port->need_reconfig_queues = 1;
616 * Create pools of mbuf.
617 * If NUMA support is disabled, create a single pool of mbuf in
618 * socket 0 memory by default.
619 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
621 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
622 * nb_txd can be configured at run time.
624 if (param_total_num_mbufs)
625 nb_mbuf_per_pool = param_total_num_mbufs;
627 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
628 (nb_lcores * mb_mempool_cache) +
629 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
630 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
636 for (i = 0; i < num_sockets; i++)
637 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
640 if (socket_num == UMA_NO_CONFIG)
641 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
643 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
650 * Records which Mbuf pool to use by each logical core, if needed.
652 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
653 mbp = mbuf_pool_find(
654 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
657 mbp = mbuf_pool_find(0);
658 fwd_lcores[lc_id]->mbp = mbp;
661 /* Configuration of packet forwarding streams. */
662 if (init_fwd_streams() < 0)
663 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
670 reconfig(portid_t new_port_id, unsigned socket_id)
672 struct rte_port *port;
674 /* Reconfiguration of Ethernet ports. */
675 port = &ports[new_port_id];
676 rte_eth_dev_info_get(new_port_id, &port->dev_info);
678 /* set flag to initialize port/queue */
679 port->need_reconfig = 1;
680 port->need_reconfig_queues = 1;
681 port->socket_id = socket_id;
688 init_fwd_streams(void)
691 struct rte_port *port;
692 streamid_t sm_id, nb_fwd_streams_new;
695 /* set socket id according to numa or not */
696 RTE_ETH_FOREACH_DEV(pid) {
698 if (nb_rxq > port->dev_info.max_rx_queues) {
699 printf("Fail: nb_rxq(%d) is greater than "
700 "max_rx_queues(%d)\n", nb_rxq,
701 port->dev_info.max_rx_queues);
704 if (nb_txq > port->dev_info.max_tx_queues) {
705 printf("Fail: nb_txq(%d) is greater than "
706 "max_tx_queues(%d)\n", nb_txq,
707 port->dev_info.max_tx_queues);
711 if (port_numa[pid] != NUMA_NO_CONFIG)
712 port->socket_id = port_numa[pid];
714 port->socket_id = rte_eth_dev_socket_id(pid);
716 /* if socket_id is invalid, set to 0 */
717 if (check_socket_id(port->socket_id) < 0)
722 if (socket_num == UMA_NO_CONFIG)
725 port->socket_id = socket_num;
729 q = RTE_MAX(nb_rxq, nb_txq);
731 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
734 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
735 if (nb_fwd_streams_new == nb_fwd_streams)
738 if (fwd_streams != NULL) {
739 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
740 if (fwd_streams[sm_id] == NULL)
742 rte_free(fwd_streams[sm_id]);
743 fwd_streams[sm_id] = NULL;
745 rte_free(fwd_streams);
750 nb_fwd_streams = nb_fwd_streams_new;
751 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
752 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
753 if (fwd_streams == NULL)
754 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
755 "failed\n", nb_fwd_streams);
757 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
758 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
759 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
760 if (fwd_streams[sm_id] == NULL)
761 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
768 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
770 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
772 unsigned int total_burst;
773 unsigned int nb_burst;
774 unsigned int burst_stats[3];
775 uint16_t pktnb_stats[3];
777 int burst_percent[3];
780 * First compute the total number of packet bursts and the
781 * two highest numbers of bursts of the same number of packets.
784 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
785 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
786 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
787 nb_burst = pbs->pkt_burst_spread[nb_pkt];
790 total_burst += nb_burst;
791 if (nb_burst > burst_stats[0]) {
792 burst_stats[1] = burst_stats[0];
793 pktnb_stats[1] = pktnb_stats[0];
794 burst_stats[0] = nb_burst;
795 pktnb_stats[0] = nb_pkt;
798 if (total_burst == 0)
800 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
801 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
802 burst_percent[0], (int) pktnb_stats[0]);
803 if (burst_stats[0] == total_burst) {
807 if (burst_stats[0] + burst_stats[1] == total_burst) {
808 printf(" + %d%% of %d pkts]\n",
809 100 - burst_percent[0], pktnb_stats[1]);
812 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
813 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
814 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
815 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
818 printf(" + %d%% of %d pkts + %d%% of others]\n",
819 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
821 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
824 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
826 struct rte_port *port;
829 static const char *fwd_stats_border = "----------------------";
831 port = &ports[port_id];
832 printf("\n %s Forward statistics for port %-2d %s\n",
833 fwd_stats_border, port_id, fwd_stats_border);
835 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
836 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
838 stats->ipackets, stats->imissed,
839 (uint64_t) (stats->ipackets + stats->imissed));
841 if (cur_fwd_eng == &csum_fwd_engine)
842 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
843 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
844 if ((stats->ierrors + stats->rx_nombuf) > 0) {
845 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
846 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
849 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
851 stats->opackets, port->tx_dropped,
852 (uint64_t) (stats->opackets + port->tx_dropped));
855 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
857 stats->ipackets, stats->imissed,
858 (uint64_t) (stats->ipackets + stats->imissed));
860 if (cur_fwd_eng == &csum_fwd_engine)
861 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
862 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
863 if ((stats->ierrors + stats->rx_nombuf) > 0) {
864 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
865 printf(" RX-nombufs: %14"PRIu64"\n",
869 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
871 stats->opackets, port->tx_dropped,
872 (uint64_t) (stats->opackets + port->tx_dropped));
875 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
877 pkt_burst_stats_display("RX",
878 &port->rx_stream->rx_burst_stats);
880 pkt_burst_stats_display("TX",
881 &port->tx_stream->tx_burst_stats);
884 if (port->rx_queue_stats_mapping_enabled) {
886 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
887 printf(" Stats reg %2d RX-packets:%14"PRIu64
888 " RX-errors:%14"PRIu64
889 " RX-bytes:%14"PRIu64"\n",
890 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
894 if (port->tx_queue_stats_mapping_enabled) {
895 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
896 printf(" Stats reg %2d TX-packets:%14"PRIu64
897 " TX-bytes:%14"PRIu64"\n",
898 i, stats->q_opackets[i], stats->q_obytes[i]);
902 printf(" %s--------------------------------%s\n",
903 fwd_stats_border, fwd_stats_border);
907 fwd_stream_stats_display(streamid_t stream_id)
909 struct fwd_stream *fs;
910 static const char *fwd_top_stats_border = "-------";
912 fs = fwd_streams[stream_id];
913 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
914 (fs->fwd_dropped == 0))
916 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
917 "TX Port=%2d/Queue=%2d %s\n",
918 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
919 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
920 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
921 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
923 /* if checksum mode */
924 if (cur_fwd_eng == &csum_fwd_engine) {
925 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
926 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
929 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
930 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
931 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
936 flush_fwd_rx_queues(void)
938 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
945 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
946 uint64_t timer_period;
948 /* convert to number of cycles */
949 timer_period = rte_get_timer_hz(); /* 1 second timeout */
951 for (j = 0; j < 2; j++) {
952 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
953 for (rxq = 0; rxq < nb_rxq; rxq++) {
954 port_id = fwd_ports_ids[rxp];
956 * testpmd can stuck in the below do while loop
957 * if rte_eth_rx_burst() always returns nonzero
958 * packets. So timer is added to exit this loop
959 * after 1sec timer expiry.
961 prev_tsc = rte_rdtsc();
963 nb_rx = rte_eth_rx_burst(port_id, rxq,
964 pkts_burst, MAX_PKT_BURST);
965 for (i = 0; i < nb_rx; i++)
966 rte_pktmbuf_free(pkts_burst[i]);
968 cur_tsc = rte_rdtsc();
969 diff_tsc = cur_tsc - prev_tsc;
970 timer_tsc += diff_tsc;
971 } while ((nb_rx > 0) &&
972 (timer_tsc < timer_period));
976 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
981 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
983 struct fwd_stream **fsm;
986 #ifdef RTE_LIBRTE_BITRATE
987 uint64_t tics_per_1sec;
989 uint64_t tics_current;
990 uint8_t idx_port, cnt_ports;
992 cnt_ports = rte_eth_dev_count();
993 tics_datum = rte_rdtsc();
994 tics_per_1sec = rte_get_timer_hz();
996 fsm = &fwd_streams[fc->stream_idx];
997 nb_fs = fc->stream_nb;
999 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1000 (*pkt_fwd)(fsm[sm_id]);
1001 #ifdef RTE_LIBRTE_BITRATE
1002 if (bitrate_enabled != 0 &&
1003 bitrate_lcore_id == rte_lcore_id()) {
1004 tics_current = rte_rdtsc();
1005 if (tics_current - tics_datum >= tics_per_1sec) {
1006 /* Periodic bitrate calculation */
1008 idx_port < cnt_ports;
1010 rte_stats_bitrate_calc(bitrate_data,
1012 tics_datum = tics_current;
1016 #ifdef RTE_LIBRTE_LATENCY_STATS
1017 if (latencystats_enabled != 0 &&
1018 latencystats_lcore_id == rte_lcore_id())
1019 rte_latencystats_update();
1022 } while (! fc->stopped);
1026 start_pkt_forward_on_core(void *fwd_arg)
1028 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1029 cur_fwd_config.fwd_eng->packet_fwd);
1034 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1035 * Used to start communication flows in network loopback test configurations.
1038 run_one_txonly_burst_on_core(void *fwd_arg)
1040 struct fwd_lcore *fwd_lc;
1041 struct fwd_lcore tmp_lcore;
1043 fwd_lc = (struct fwd_lcore *) fwd_arg;
1044 tmp_lcore = *fwd_lc;
1045 tmp_lcore.stopped = 1;
1046 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1051 * Launch packet forwarding:
1052 * - Setup per-port forwarding context.
1053 * - launch logical cores with their forwarding configuration.
1056 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1058 port_fwd_begin_t port_fwd_begin;
1063 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1064 if (port_fwd_begin != NULL) {
1065 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1066 (*port_fwd_begin)(fwd_ports_ids[i]);
1068 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1069 lc_id = fwd_lcores_cpuids[i];
1070 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1071 fwd_lcores[i]->stopped = 0;
1072 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1073 fwd_lcores[i], lc_id);
1075 printf("launch lcore %u failed - diag=%d\n",
1082 * Launch packet forwarding configuration.
1085 start_packet_forwarding(int with_tx_first)
1087 port_fwd_begin_t port_fwd_begin;
1088 port_fwd_end_t port_fwd_end;
1089 struct rte_port *port;
1094 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1095 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1097 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1098 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1100 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1101 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1102 (!nb_rxq || !nb_txq))
1103 rte_exit(EXIT_FAILURE,
1104 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1105 cur_fwd_eng->fwd_mode_name);
1107 if (all_ports_started() == 0) {
1108 printf("Not all ports were started\n");
1111 if (test_done == 0) {
1112 printf("Packet forwarding already started\n");
1116 if (init_fwd_streams() < 0) {
1117 printf("Fail from init_fwd_streams()\n");
1122 for (i = 0; i < nb_fwd_ports; i++) {
1123 pt_id = fwd_ports_ids[i];
1124 port = &ports[pt_id];
1125 if (!port->dcb_flag) {
1126 printf("In DCB mode, all forwarding ports must "
1127 "be configured in this mode.\n");
1131 if (nb_fwd_lcores == 1) {
1132 printf("In DCB mode,the nb forwarding cores "
1133 "should be larger than 1.\n");
1140 flush_fwd_rx_queues();
1143 pkt_fwd_config_display(&cur_fwd_config);
1144 rxtx_config_display();
1146 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1147 pt_id = fwd_ports_ids[i];
1148 port = &ports[pt_id];
1149 rte_eth_stats_get(pt_id, &port->stats);
1150 port->tx_dropped = 0;
1152 map_port_queue_stats_mapping_registers(pt_id, port);
1154 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1155 fwd_streams[sm_id]->rx_packets = 0;
1156 fwd_streams[sm_id]->tx_packets = 0;
1157 fwd_streams[sm_id]->fwd_dropped = 0;
1158 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1159 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1161 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1162 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1163 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1164 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1165 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1167 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1168 fwd_streams[sm_id]->core_cycles = 0;
1171 if (with_tx_first) {
1172 port_fwd_begin = tx_only_engine.port_fwd_begin;
1173 if (port_fwd_begin != NULL) {
1174 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1175 (*port_fwd_begin)(fwd_ports_ids[i]);
1177 while (with_tx_first--) {
1178 launch_packet_forwarding(
1179 run_one_txonly_burst_on_core);
1180 rte_eal_mp_wait_lcore();
1182 port_fwd_end = tx_only_engine.port_fwd_end;
1183 if (port_fwd_end != NULL) {
1184 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1185 (*port_fwd_end)(fwd_ports_ids[i]);
1188 launch_packet_forwarding(start_pkt_forward_on_core);
1192 stop_packet_forwarding(void)
1194 struct rte_eth_stats stats;
1195 struct rte_port *port;
1196 port_fwd_end_t port_fwd_end;
1201 uint64_t total_recv;
1202 uint64_t total_xmit;
1203 uint64_t total_rx_dropped;
1204 uint64_t total_tx_dropped;
1205 uint64_t total_rx_nombuf;
1206 uint64_t tx_dropped;
1207 uint64_t rx_bad_ip_csum;
1208 uint64_t rx_bad_l4_csum;
1209 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1210 uint64_t fwd_cycles;
1212 static const char *acc_stats_border = "+++++++++++++++";
1215 printf("Packet forwarding not started\n");
1218 printf("Telling cores to stop...");
1219 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1220 fwd_lcores[lc_id]->stopped = 1;
1221 printf("\nWaiting for lcores to finish...\n");
1222 rte_eal_mp_wait_lcore();
1223 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1224 if (port_fwd_end != NULL) {
1225 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1226 pt_id = fwd_ports_ids[i];
1227 (*port_fwd_end)(pt_id);
1230 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1233 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1234 if (cur_fwd_config.nb_fwd_streams >
1235 cur_fwd_config.nb_fwd_ports) {
1236 fwd_stream_stats_display(sm_id);
1237 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1238 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1240 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1242 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1245 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1246 tx_dropped = (uint64_t) (tx_dropped +
1247 fwd_streams[sm_id]->fwd_dropped);
1248 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1251 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1252 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1253 fwd_streams[sm_id]->rx_bad_ip_csum);
1254 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1258 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1259 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1260 fwd_streams[sm_id]->rx_bad_l4_csum);
1261 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1264 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1265 fwd_cycles = (uint64_t) (fwd_cycles +
1266 fwd_streams[sm_id]->core_cycles);
1271 total_rx_dropped = 0;
1272 total_tx_dropped = 0;
1273 total_rx_nombuf = 0;
1274 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1275 pt_id = fwd_ports_ids[i];
1277 port = &ports[pt_id];
1278 rte_eth_stats_get(pt_id, &stats);
1279 stats.ipackets -= port->stats.ipackets;
1280 port->stats.ipackets = 0;
1281 stats.opackets -= port->stats.opackets;
1282 port->stats.opackets = 0;
1283 stats.ibytes -= port->stats.ibytes;
1284 port->stats.ibytes = 0;
1285 stats.obytes -= port->stats.obytes;
1286 port->stats.obytes = 0;
1287 stats.imissed -= port->stats.imissed;
1288 port->stats.imissed = 0;
1289 stats.oerrors -= port->stats.oerrors;
1290 port->stats.oerrors = 0;
1291 stats.rx_nombuf -= port->stats.rx_nombuf;
1292 port->stats.rx_nombuf = 0;
1294 total_recv += stats.ipackets;
1295 total_xmit += stats.opackets;
1296 total_rx_dropped += stats.imissed;
1297 total_tx_dropped += port->tx_dropped;
1298 total_rx_nombuf += stats.rx_nombuf;
1300 fwd_port_stats_display(pt_id, &stats);
1302 printf("\n %s Accumulated forward statistics for all ports"
1304 acc_stats_border, acc_stats_border);
1305 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1307 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1309 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1310 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1311 if (total_rx_nombuf > 0)
1312 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1313 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1315 acc_stats_border, acc_stats_border);
1316 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1318 printf("\n CPU cycles/packet=%u (total cycles="
1319 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1320 (unsigned int)(fwd_cycles / total_recv),
1321 fwd_cycles, total_recv);
1323 printf("\nDone.\n");
1328 dev_set_link_up(portid_t pid)
1330 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1331 printf("\nSet link up fail.\n");
1335 dev_set_link_down(portid_t pid)
1337 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1338 printf("\nSet link down fail.\n");
1342 all_ports_started(void)
1345 struct rte_port *port;
1347 RTE_ETH_FOREACH_DEV(pi) {
1349 /* Check if there is a port which is not started */
1350 if ((port->port_status != RTE_PORT_STARTED) &&
1351 (port->slave_flag == 0))
1355 /* No port is not started */
1360 all_ports_stopped(void)
1363 struct rte_port *port;
1365 RTE_ETH_FOREACH_DEV(pi) {
1367 if ((port->port_status != RTE_PORT_STOPPED) &&
1368 (port->slave_flag == 0))
1376 port_is_started(portid_t port_id)
1378 if (port_id_is_invalid(port_id, ENABLED_WARN))
1381 if (ports[port_id].port_status != RTE_PORT_STARTED)
1388 port_is_closed(portid_t port_id)
1390 if (port_id_is_invalid(port_id, ENABLED_WARN))
1393 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1400 start_port(portid_t pid)
1402 int diag, need_check_link_status = -1;
1405 struct rte_port *port;
1406 struct ether_addr mac_addr;
1407 enum rte_eth_event_type event_type;
1409 if (port_id_is_invalid(pid, ENABLED_WARN))
1414 RTE_ETH_FOREACH_DEV(pi) {
1415 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1418 need_check_link_status = 0;
1420 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1421 RTE_PORT_HANDLING) == 0) {
1422 printf("Port %d is now not stopped\n", pi);
1426 if (port->need_reconfig > 0) {
1427 port->need_reconfig = 0;
1429 printf("Configuring Port %d (socket %u)\n", pi,
1431 /* configure port */
1432 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1435 if (rte_atomic16_cmpset(&(port->port_status),
1436 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1437 printf("Port %d can not be set back "
1438 "to stopped\n", pi);
1439 printf("Fail to configure port %d\n", pi);
1440 /* try to reconfigure port next time */
1441 port->need_reconfig = 1;
1445 if (port->need_reconfig_queues > 0) {
1446 port->need_reconfig_queues = 0;
1447 /* setup tx queues */
1448 for (qi = 0; qi < nb_txq; qi++) {
1449 if ((numa_support) &&
1450 (txring_numa[pi] != NUMA_NO_CONFIG))
1451 diag = rte_eth_tx_queue_setup(pi, qi,
1452 nb_txd,txring_numa[pi],
1455 diag = rte_eth_tx_queue_setup(pi, qi,
1456 nb_txd,port->socket_id,
1462 /* Fail to setup tx queue, return */
1463 if (rte_atomic16_cmpset(&(port->port_status),
1465 RTE_PORT_STOPPED) == 0)
1466 printf("Port %d can not be set back "
1467 "to stopped\n", pi);
1468 printf("Fail to configure port %d tx queues\n", pi);
1469 /* try to reconfigure queues next time */
1470 port->need_reconfig_queues = 1;
1473 /* setup rx queues */
1474 for (qi = 0; qi < nb_rxq; qi++) {
1475 if ((numa_support) &&
1476 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1477 struct rte_mempool * mp =
1478 mbuf_pool_find(rxring_numa[pi]);
1480 printf("Failed to setup RX queue:"
1481 "No mempool allocation"
1482 " on the socket %d\n",
1487 diag = rte_eth_rx_queue_setup(pi, qi,
1488 nb_rxd,rxring_numa[pi],
1489 &(port->rx_conf),mp);
1491 struct rte_mempool *mp =
1492 mbuf_pool_find(port->socket_id);
1494 printf("Failed to setup RX queue:"
1495 "No mempool allocation"
1496 " on the socket %d\n",
1500 diag = rte_eth_rx_queue_setup(pi, qi,
1501 nb_rxd,port->socket_id,
1502 &(port->rx_conf), mp);
1507 /* Fail to setup rx queue, return */
1508 if (rte_atomic16_cmpset(&(port->port_status),
1510 RTE_PORT_STOPPED) == 0)
1511 printf("Port %d can not be set back "
1512 "to stopped\n", pi);
1513 printf("Fail to configure port %d rx queues\n", pi);
1514 /* try to reconfigure queues next time */
1515 port->need_reconfig_queues = 1;
1520 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1521 event_type < RTE_ETH_EVENT_MAX;
1523 diag = rte_eth_dev_callback_register(pi,
1528 printf("Failed to setup even callback for event %d\n",
1535 if (rte_eth_dev_start(pi) < 0) {
1536 printf("Fail to start port %d\n", pi);
1538 /* Fail to setup rx queue, return */
1539 if (rte_atomic16_cmpset(&(port->port_status),
1540 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1541 printf("Port %d can not be set back to "
1546 if (rte_atomic16_cmpset(&(port->port_status),
1547 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1548 printf("Port %d can not be set into started\n", pi);
1550 rte_eth_macaddr_get(pi, &mac_addr);
1551 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1552 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1553 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1554 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1556 /* at least one port started, need checking link status */
1557 need_check_link_status = 1;
1560 if (need_check_link_status == 1 && !no_link_check)
1561 check_all_ports_link_status(RTE_PORT_ALL);
1562 else if (need_check_link_status == 0)
1563 printf("Please stop the ports first\n");
1570 stop_port(portid_t pid)
1573 struct rte_port *port;
1574 int need_check_link_status = 0;
1581 if (port_id_is_invalid(pid, ENABLED_WARN))
1584 printf("Stopping ports...\n");
1586 RTE_ETH_FOREACH_DEV(pi) {
1587 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1590 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1591 printf("Please remove port %d from forwarding configuration.\n", pi);
1595 if (port_is_bonding_slave(pi)) {
1596 printf("Please remove port %d from bonded device.\n", pi);
1601 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1602 RTE_PORT_HANDLING) == 0)
1605 rte_eth_dev_stop(pi);
1607 if (rte_atomic16_cmpset(&(port->port_status),
1608 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1609 printf("Port %d can not be set into stopped\n", pi);
1610 need_check_link_status = 1;
1612 if (need_check_link_status && !no_link_check)
1613 check_all_ports_link_status(RTE_PORT_ALL);
1619 close_port(portid_t pid)
1622 struct rte_port *port;
1624 if (port_id_is_invalid(pid, ENABLED_WARN))
1627 printf("Closing ports...\n");
1629 RTE_ETH_FOREACH_DEV(pi) {
1630 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1633 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1634 printf("Please remove port %d from forwarding configuration.\n", pi);
1638 if (port_is_bonding_slave(pi)) {
1639 printf("Please remove port %d from bonded device.\n", pi);
1644 if (rte_atomic16_cmpset(&(port->port_status),
1645 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1646 printf("Port %d is already closed\n", pi);
1650 if (rte_atomic16_cmpset(&(port->port_status),
1651 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1652 printf("Port %d is now not stopped\n", pi);
1656 if (port->flow_list)
1657 port_flow_flush(pi);
1658 rte_eth_dev_close(pi);
1660 if (rte_atomic16_cmpset(&(port->port_status),
1661 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1662 printf("Port %d cannot be set to closed\n", pi);
1669 attach_port(char *identifier)
1672 unsigned int socket_id;
1674 printf("Attaching a new port...\n");
1676 if (identifier == NULL) {
1677 printf("Invalid parameters are specified\n");
1681 if (rte_eth_dev_attach(identifier, &pi))
1684 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1685 /* if socket_id is invalid, set to 0 */
1686 if (check_socket_id(socket_id) < 0)
1688 reconfig(pi, socket_id);
1689 rte_eth_promiscuous_enable(pi);
1691 nb_ports = rte_eth_dev_count();
1693 ports[pi].port_status = RTE_PORT_STOPPED;
1695 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1700 detach_port(uint8_t port_id)
1702 char name[RTE_ETH_NAME_MAX_LEN];
1704 printf("Detaching a port...\n");
1706 if (!port_is_closed(port_id)) {
1707 printf("Please close port first\n");
1711 if (ports[port_id].flow_list)
1712 port_flow_flush(port_id);
1714 if (rte_eth_dev_detach(port_id, name))
1717 nb_ports = rte_eth_dev_count();
1719 printf("Port '%s' is detached. Now total ports is %d\n",
1731 stop_packet_forwarding();
1733 if (ports != NULL) {
1735 RTE_ETH_FOREACH_DEV(pt_id) {
1736 printf("\nShutting down port %d...\n", pt_id);
1742 printf("\nBye...\n");
1745 typedef void (*cmd_func_t)(void);
1746 struct pmd_test_command {
1747 const char *cmd_name;
1748 cmd_func_t cmd_func;
1751 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1753 /* Check the link status of all ports in up to 9s, and print them finally */
1755 check_all_ports_link_status(uint32_t port_mask)
1757 #define CHECK_INTERVAL 100 /* 100ms */
1758 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1759 uint8_t portid, count, all_ports_up, print_flag = 0;
1760 struct rte_eth_link link;
1762 printf("Checking link statuses...\n");
1764 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1766 RTE_ETH_FOREACH_DEV(portid) {
1767 if ((port_mask & (1 << portid)) == 0)
1769 memset(&link, 0, sizeof(link));
1770 rte_eth_link_get_nowait(portid, &link);
1771 /* print link status if flag set */
1772 if (print_flag == 1) {
1773 if (link.link_status)
1774 printf("Port %d Link Up - speed %u "
1775 "Mbps - %s\n", (uint8_t)portid,
1776 (unsigned)link.link_speed,
1777 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1778 ("full-duplex") : ("half-duplex\n"));
1780 printf("Port %d Link Down\n",
1784 /* clear all_ports_up flag if any link down */
1785 if (link.link_status == ETH_LINK_DOWN) {
1790 /* after finally printing all link status, get out */
1791 if (print_flag == 1)
1794 if (all_ports_up == 0) {
1796 rte_delay_ms(CHECK_INTERVAL);
1799 /* set the print_flag if all ports up or timeout */
1800 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1810 rmv_event_callback(void *arg)
1812 struct rte_eth_dev *dev;
1813 struct rte_devargs *da;
1815 uint8_t port_id = (intptr_t)arg;
1817 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1818 dev = &rte_eth_devices[port_id];
1819 da = dev->device->devargs;
1822 close_port(port_id);
1823 if (da->type == RTE_DEVTYPE_VIRTUAL)
1824 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1825 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1826 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1827 printf("removing device %s\n", name);
1828 rte_eal_dev_detach(dev->device);
1829 dev->state = RTE_ETH_DEV_UNUSED;
1832 /* This function is used by the interrupt thread */
1834 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1837 static const char * const event_desc[] = {
1838 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1839 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1840 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1841 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1842 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1843 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1844 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1845 [RTE_ETH_EVENT_MAX] = NULL,
1848 RTE_SET_USED(param);
1849 RTE_SET_USED(ret_param);
1851 if (type >= RTE_ETH_EVENT_MAX) {
1852 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1853 port_id, __func__, type);
1855 } else if (event_print_mask & (UINT32_C(1) << type)) {
1856 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1862 case RTE_ETH_EVENT_INTR_RMV:
1863 if (rte_eal_alarm_set(100000,
1864 rmv_event_callback, (void *)(intptr_t)port_id))
1865 fprintf(stderr, "Could not set up deferred device removal\n");
1874 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1878 uint8_t mapping_found = 0;
1880 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1881 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1882 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1883 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1884 tx_queue_stats_mappings[i].queue_id,
1885 tx_queue_stats_mappings[i].stats_counter_id);
1892 port->tx_queue_stats_mapping_enabled = 1;
1897 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1901 uint8_t mapping_found = 0;
1903 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1904 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1905 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1906 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1907 rx_queue_stats_mappings[i].queue_id,
1908 rx_queue_stats_mappings[i].stats_counter_id);
1915 port->rx_queue_stats_mapping_enabled = 1;
1920 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1924 diag = set_tx_queue_stats_mapping_registers(pi, port);
1926 if (diag == -ENOTSUP) {
1927 port->tx_queue_stats_mapping_enabled = 0;
1928 printf("TX queue stats mapping not supported port id=%d\n", pi);
1931 rte_exit(EXIT_FAILURE,
1932 "set_tx_queue_stats_mapping_registers "
1933 "failed for port id=%d diag=%d\n",
1937 diag = set_rx_queue_stats_mapping_registers(pi, port);
1939 if (diag == -ENOTSUP) {
1940 port->rx_queue_stats_mapping_enabled = 0;
1941 printf("RX queue stats mapping not supported port id=%d\n", pi);
1944 rte_exit(EXIT_FAILURE,
1945 "set_rx_queue_stats_mapping_registers "
1946 "failed for port id=%d diag=%d\n",
1952 rxtx_port_config(struct rte_port *port)
1954 port->rx_conf = port->dev_info.default_rxconf;
1955 port->tx_conf = port->dev_info.default_txconf;
1957 /* Check if any RX/TX parameters have been passed */
1958 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1959 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1961 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1962 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1964 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1965 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1967 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1968 port->rx_conf.rx_free_thresh = rx_free_thresh;
1970 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1971 port->rx_conf.rx_drop_en = rx_drop_en;
1973 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1974 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1976 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1977 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1979 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1980 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1982 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1983 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1985 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1986 port->tx_conf.tx_free_thresh = tx_free_thresh;
1988 if (txq_flags != RTE_PMD_PARAM_UNSET)
1989 port->tx_conf.txq_flags = txq_flags;
1993 init_port_config(void)
1996 struct rte_port *port;
1998 RTE_ETH_FOREACH_DEV(pid) {
2000 port->dev_conf.rxmode = rx_mode;
2001 port->dev_conf.fdir_conf = fdir_conf;
2003 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2004 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2006 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2007 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2010 if (port->dcb_flag == 0) {
2011 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2012 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2014 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2017 rxtx_port_config(port);
2019 rte_eth_macaddr_get(pid, &port->eth_addr);
2021 map_port_queue_stats_mapping_registers(pid, port);
2022 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2023 rte_pmd_ixgbe_bypass_init(pid);
2026 if (lsc_interrupt &&
2027 (rte_eth_devices[pid].data->dev_flags &
2028 RTE_ETH_DEV_INTR_LSC))
2029 port->dev_conf.intr_conf.lsc = 1;
2030 if (rmv_interrupt &&
2031 (rte_eth_devices[pid].data->dev_flags &
2032 RTE_ETH_DEV_INTR_RMV))
2033 port->dev_conf.intr_conf.rmv = 1;
2037 void set_port_slave_flag(portid_t slave_pid)
2039 struct rte_port *port;
2041 port = &ports[slave_pid];
2042 port->slave_flag = 1;
2045 void clear_port_slave_flag(portid_t slave_pid)
2047 struct rte_port *port;
2049 port = &ports[slave_pid];
2050 port->slave_flag = 0;
2053 uint8_t port_is_bonding_slave(portid_t slave_pid)
2055 struct rte_port *port;
2057 port = &ports[slave_pid];
2058 return port->slave_flag;
2061 const uint16_t vlan_tags[] = {
2062 0, 1, 2, 3, 4, 5, 6, 7,
2063 8, 9, 10, 11, 12, 13, 14, 15,
2064 16, 17, 18, 19, 20, 21, 22, 23,
2065 24, 25, 26, 27, 28, 29, 30, 31
2069 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2070 enum dcb_mode_enable dcb_mode,
2071 enum rte_eth_nb_tcs num_tcs,
2077 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2078 * given above, and the number of traffic classes available for use.
2080 if (dcb_mode == DCB_VT_ENABLED) {
2081 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2082 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2083 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2084 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2086 /* VMDQ+DCB RX and TX configurations */
2087 vmdq_rx_conf->enable_default_pool = 0;
2088 vmdq_rx_conf->default_pool = 0;
2089 vmdq_rx_conf->nb_queue_pools =
2090 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2091 vmdq_tx_conf->nb_queue_pools =
2092 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2094 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2095 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2096 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2097 vmdq_rx_conf->pool_map[i].pools =
2098 1 << (i % vmdq_rx_conf->nb_queue_pools);
2100 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2101 vmdq_rx_conf->dcb_tc[i] = i;
2102 vmdq_tx_conf->dcb_tc[i] = i;
2105 /* set DCB mode of RX and TX of multiple queues */
2106 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2107 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2109 struct rte_eth_dcb_rx_conf *rx_conf =
2110 ð_conf->rx_adv_conf.dcb_rx_conf;
2111 struct rte_eth_dcb_tx_conf *tx_conf =
2112 ð_conf->tx_adv_conf.dcb_tx_conf;
2114 rx_conf->nb_tcs = num_tcs;
2115 tx_conf->nb_tcs = num_tcs;
2117 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2118 rx_conf->dcb_tc[i] = i % num_tcs;
2119 tx_conf->dcb_tc[i] = i % num_tcs;
2121 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2122 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2123 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2127 eth_conf->dcb_capability_en =
2128 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2130 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2136 init_port_dcb_config(portid_t pid,
2137 enum dcb_mode_enable dcb_mode,
2138 enum rte_eth_nb_tcs num_tcs,
2141 struct rte_eth_conf port_conf;
2142 struct rte_port *rte_port;
2146 rte_port = &ports[pid];
2148 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2149 /* Enter DCB configuration status */
2152 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2153 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2156 port_conf.rxmode.hw_vlan_filter = 1;
2159 * Write the configuration into the device.
2160 * Set the numbers of RX & TX queues to 0, so
2161 * the RX & TX queues will not be setup.
2163 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2165 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2167 /* If dev_info.vmdq_pool_base is greater than 0,
2168 * the queue id of vmdq pools is started after pf queues.
2170 if (dcb_mode == DCB_VT_ENABLED &&
2171 rte_port->dev_info.vmdq_pool_base > 0) {
2172 printf("VMDQ_DCB multi-queue mode is nonsensical"
2173 " for port %d.", pid);
2177 /* Assume the ports in testpmd have the same dcb capability
2178 * and has the same number of rxq and txq in dcb mode
2180 if (dcb_mode == DCB_VT_ENABLED) {
2181 if (rte_port->dev_info.max_vfs > 0) {
2182 nb_rxq = rte_port->dev_info.nb_rx_queues;
2183 nb_txq = rte_port->dev_info.nb_tx_queues;
2185 nb_rxq = rte_port->dev_info.max_rx_queues;
2186 nb_txq = rte_port->dev_info.max_tx_queues;
2189 /*if vt is disabled, use all pf queues */
2190 if (rte_port->dev_info.vmdq_pool_base == 0) {
2191 nb_rxq = rte_port->dev_info.max_rx_queues;
2192 nb_txq = rte_port->dev_info.max_tx_queues;
2194 nb_rxq = (queueid_t)num_tcs;
2195 nb_txq = (queueid_t)num_tcs;
2199 rx_free_thresh = 64;
2201 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2203 rxtx_port_config(rte_port);
2205 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2206 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2207 rx_vft_set(pid, vlan_tags[i], 1);
2209 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2210 map_port_queue_stats_mapping_registers(pid, rte_port);
2212 rte_port->dcb_flag = 1;
2220 /* Configuration of Ethernet ports. */
2221 ports = rte_zmalloc("testpmd: ports",
2222 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2223 RTE_CACHE_LINE_SIZE);
2224 if (ports == NULL) {
2225 rte_exit(EXIT_FAILURE,
2226 "rte_zmalloc(%d struct rte_port) failed\n",
2242 const char clr[] = { 27, '[', '2', 'J', '\0' };
2243 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2245 /* Clear screen and move to top left */
2246 printf("%s%s", clr, top_left);
2248 printf("\nPort statistics ====================================");
2249 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2250 nic_stats_display(fwd_ports_ids[i]);
2254 signal_handler(int signum)
2256 if (signum == SIGINT || signum == SIGTERM) {
2257 printf("\nSignal %d received, preparing to exit...\n",
2259 #ifdef RTE_LIBRTE_PDUMP
2260 /* uninitialize packet capture framework */
2263 #ifdef RTE_LIBRTE_LATENCY_STATS
2264 rte_latencystats_uninit();
2267 /* exit with the expected status */
2268 signal(signum, SIG_DFL);
2269 kill(getpid(), signum);
2274 main(int argc, char** argv)
2279 signal(SIGINT, signal_handler);
2280 signal(SIGTERM, signal_handler);
2282 diag = rte_eal_init(argc, argv);
2284 rte_panic("Cannot init EAL\n");
2286 #ifdef RTE_LIBRTE_PDUMP
2287 /* initialize packet capture framework */
2288 rte_pdump_init(NULL);
2291 nb_ports = (portid_t) rte_eth_dev_count();
2293 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2295 /* allocate port structures, and init them */
2298 set_def_fwd_config();
2300 rte_panic("Empty set of forwarding logical cores - check the "
2301 "core mask supplied in the command parameters\n");
2303 /* Bitrate/latency stats disabled by default */
2304 #ifdef RTE_LIBRTE_BITRATE
2305 bitrate_enabled = 0;
2307 #ifdef RTE_LIBRTE_LATENCY_STATS
2308 latencystats_enabled = 0;
2314 launch_args_parse(argc, argv);
2316 if (tx_first && interactive)
2317 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2318 "interactive mode.\n");
2319 if (!nb_rxq && !nb_txq)
2320 printf("Warning: Either rx or tx queues should be non-zero\n");
2322 if (nb_rxq > 1 && nb_rxq > nb_txq)
2323 printf("Warning: nb_rxq=%d enables RSS configuration, "
2324 "but nb_txq=%d will prevent to fully test it.\n",
2328 if (start_port(RTE_PORT_ALL) != 0)
2329 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2331 /* set all ports to promiscuous mode by default */
2332 RTE_ETH_FOREACH_DEV(port_id)
2333 rte_eth_promiscuous_enable(port_id);
2335 /* Init metrics library */
2336 rte_metrics_init(rte_socket_id());
2338 #ifdef RTE_LIBRTE_LATENCY_STATS
2339 if (latencystats_enabled != 0) {
2340 int ret = rte_latencystats_init(1, NULL);
2342 printf("Warning: latencystats init()"
2343 " returned error %d\n", ret);
2344 printf("Latencystats running on lcore %d\n",
2345 latencystats_lcore_id);
2349 /* Setup bitrate stats */
2350 #ifdef RTE_LIBRTE_BITRATE
2351 if (bitrate_enabled != 0) {
2352 bitrate_data = rte_stats_bitrate_create();
2353 if (bitrate_data == NULL)
2354 rte_exit(EXIT_FAILURE,
2355 "Could not allocate bitrate data.\n");
2356 rte_stats_bitrate_reg(bitrate_data);
2360 #ifdef RTE_LIBRTE_CMDLINE
2361 if (strlen(cmdline_filename) != 0)
2362 cmdline_read_from_file(cmdline_filename);
2364 if (interactive == 1) {
2366 printf("Start automatic packet forwarding\n");
2367 start_packet_forwarding(0);
2377 printf("No commandline core given, start packet forwarding\n");
2378 start_packet_forwarding(tx_first);
2379 if (stats_period != 0) {
2380 uint64_t prev_time = 0, cur_time, diff_time = 0;
2381 uint64_t timer_period;
2383 /* Convert to number of cycles */
2384 timer_period = stats_period * rte_get_timer_hz();
2387 cur_time = rte_get_timer_cycles();
2388 diff_time += cur_time - prev_time;
2390 if (diff_time >= timer_period) {
2392 /* Reset the timer */
2395 /* Sleep to avoid unnecessary checks */
2396 prev_time = cur_time;
2401 printf("Press enter to exit\n");
2402 rc = read(0, &c, 1);