4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
97 uint16_t verbose_level = 0; /**< Silent by default. */
99 /* use master core for command line ? */
100 uint8_t interactive = 0;
101 uint8_t auto_start = 0;
103 char cmdline_filename[PATH_MAX] = {0};
106 * NUMA support configuration.
107 * When set, the NUMA support attempts to dispatch the allocation of the
108 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
109 * probed ports among the CPU sockets 0 and 1.
110 * Otherwise, all memory is allocated from CPU socket 0.
112 uint8_t numa_support = 1; /**< numa enabled by default */
115 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
118 uint8_t socket_num = UMA_NO_CONFIG;
121 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
126 * Record the Ethernet address of peer target ports to which packets are
128 * Must be instantiated with the ethernet addresses of peer traffic generator
131 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
132 portid_t nb_peer_eth_addrs = 0;
135 * Probed Target Environment.
137 struct rte_port *ports; /**< For all probed ethernet ports. */
138 portid_t nb_ports; /**< Number of probed ethernet ports. */
139 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
140 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
143 * Test Forwarding Configuration.
144 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
145 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
147 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
148 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
149 portid_t nb_cfg_ports; /**< Number of configured ports. */
150 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
152 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
153 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
155 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
156 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
159 * Forwarding engines.
161 struct fwd_engine * fwd_engines[] = {
170 #ifdef RTE_LIBRTE_IEEE1588
171 &ieee1588_fwd_engine,
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
184 * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
187 * Configuration of packet segments used by the "txonly" processing engine.
189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191 TXONLY_DEF_PACKET_LEN,
193 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
196 /**< Split policy for packets to TX. */
198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
201 /* current configuration is in DCB or not,0 means it is not in DCB mode */
202 uint8_t dcb_config = 0;
204 /* Whether the dcb is in testing status */
205 uint8_t dcb_test = 0;
208 * Configurable number of RX/TX queues.
210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
214 * Configurable number of RX/TX ring descriptors.
216 #define RTE_TEST_RX_DESC_DEFAULT 128
217 #define RTE_TEST_TX_DESC_DEFAULT 512
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 #define RTE_PMD_PARAM_UNSET -1
223 * Configurable values of RX and TX ring threshold registers.
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX free threshold.
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of RX drop enable.
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX free threshold.
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX RS bit threshold.
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
255 * Configurable value of TX queue flags.
257 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
260 * Receive Side Scaling (RSS) configuration.
262 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
265 * Port topology configuration
267 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
270 * Avoids to flush all the RX streams before starts forwarding.
272 uint8_t no_flush_rx = 0; /* flush by default */
275 * Flow API isolated mode.
277 uint8_t flow_isolate_all;
280 * Avoids to check link status when starting/stopping a port.
282 uint8_t no_link_check = 0; /* check by default */
285 * Enable link status change notification
287 uint8_t lsc_interrupt = 1; /* enabled by default */
290 * Enable device removal notification.
292 uint8_t rmv_interrupt = 1; /* enabled by default */
295 * Display or mask ether events
296 * Default to all events except VF_MBOX
298 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
299 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
301 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
302 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
303 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
306 * NIC bypass mode configuration options.
309 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
310 /* The NIC bypass watchdog timeout. */
311 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
315 #ifdef RTE_LIBRTE_LATENCY_STATS
318 * Set when latency stats is enabled in the commandline
320 uint8_t latencystats_enabled;
323 * Lcore ID to serive latency statistics.
325 lcoreid_t latencystats_lcore_id = -1;
330 * Ethernet device configuration.
332 struct rte_eth_rxmode rx_mode = {
333 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
335 .header_split = 0, /**< Header Split disabled. */
336 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
337 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
338 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
339 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
340 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
341 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
344 struct rte_fdir_conf fdir_conf = {
345 .mode = RTE_FDIR_MODE_NONE,
346 .pballoc = RTE_FDIR_PBALLOC_64K,
347 .status = RTE_FDIR_REPORT_STATUS,
349 .vlan_tci_mask = 0x0,
351 .src_ip = 0xFFFFFFFF,
352 .dst_ip = 0xFFFFFFFF,
355 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
358 .src_port_mask = 0xFFFF,
359 .dst_port_mask = 0xFFFF,
360 .mac_addr_byte_mask = 0xFF,
361 .tunnel_type_mask = 1,
362 .tunnel_id_mask = 0xFFFFFFFF,
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
378 unsigned int num_sockets = 0;
379 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
381 #ifdef RTE_LIBRTE_BITRATE
382 /* Bitrate statistics */
383 struct rte_stats_bitrates *bitrate_data;
384 lcoreid_t bitrate_lcore_id;
385 uint8_t bitrate_enabled;
388 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
390 /* Forward function declarations */
391 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
392 static void check_all_ports_link_status(uint32_t port_mask);
393 static int eth_event_callback(uint8_t port_id,
394 enum rte_eth_event_type type,
395 void *param, void *ret_param);
398 * Check if all the ports are started.
399 * If yes, return positive value. If not, return zero.
401 static int all_ports_started(void);
404 * Helper function to check if socket is already discovered.
405 * If yes, return positive value. If not, return zero.
408 new_socket_id(unsigned int socket_id)
412 for (i = 0; i < num_sockets; i++) {
413 if (socket_ids[i] == socket_id)
420 * Setup default configuration.
423 set_default_fwd_lcores_config(void)
427 unsigned int sock_num;
430 for (i = 0; i < RTE_MAX_LCORE; i++) {
431 sock_num = rte_lcore_to_socket_id(i);
432 if (new_socket_id(sock_num)) {
433 if (num_sockets >= RTE_MAX_NUMA_NODES) {
434 rte_exit(EXIT_FAILURE,
435 "Total sockets greater than %u\n",
438 socket_ids[num_sockets++] = sock_num;
440 if (!rte_lcore_is_enabled(i))
442 if (i == rte_get_master_lcore())
444 fwd_lcores_cpuids[nb_lc++] = i;
446 nb_lcores = (lcoreid_t) nb_lc;
447 nb_cfg_lcores = nb_lcores;
452 set_def_peer_eth_addrs(void)
456 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
457 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
458 peer_eth_addrs[i].addr_bytes[5] = i;
463 set_default_fwd_ports_config(void)
467 for (pt_id = 0; pt_id < nb_ports; pt_id++)
468 fwd_ports_ids[pt_id] = pt_id;
470 nb_cfg_ports = nb_ports;
471 nb_fwd_ports = nb_ports;
475 set_def_fwd_config(void)
477 set_default_fwd_lcores_config();
478 set_def_peer_eth_addrs();
479 set_default_fwd_ports_config();
483 * Configuration initialisation done once at init time.
486 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
487 unsigned int socket_id)
489 char pool_name[RTE_MEMPOOL_NAMESIZE];
490 struct rte_mempool *rte_mp = NULL;
493 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
494 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
497 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
498 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
500 #ifdef RTE_LIBRTE_PMD_XENVIRT
501 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
502 (unsigned) mb_mempool_cache,
503 sizeof(struct rte_pktmbuf_pool_private),
504 rte_pktmbuf_pool_init, NULL,
505 rte_pktmbuf_init, NULL,
509 /* if the former XEN allocation failed fall back to normal allocation */
510 if (rte_mp == NULL) {
512 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
513 mb_size, (unsigned) mb_mempool_cache,
514 sizeof(struct rte_pktmbuf_pool_private),
519 if (rte_mempool_populate_anon(rte_mp) == 0) {
520 rte_mempool_free(rte_mp);
524 rte_pktmbuf_pool_init(rte_mp, NULL);
525 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
527 /* wrapper to rte_mempool_create() */
528 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
529 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
534 if (rte_mp == NULL) {
535 rte_exit(EXIT_FAILURE,
536 "Creation of mbuf pool for socket %u failed: %s\n",
537 socket_id, rte_strerror(rte_errno));
538 } else if (verbose_level > 0) {
539 rte_mempool_dump(stdout, rte_mp);
544 * Check given socket id is valid or not with NUMA mode,
545 * if valid, return 0, else return -1
548 check_socket_id(const unsigned int socket_id)
550 static int warning_once = 0;
552 if (new_socket_id(socket_id)) {
553 if (!warning_once && numa_support)
554 printf("Warning: NUMA should be configured manually by"
555 " using --port-numa-config and"
556 " --ring-numa-config parameters along with"
568 struct rte_port *port;
569 struct rte_mempool *mbp;
570 unsigned int nb_mbuf_per_pool;
572 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
574 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
577 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
578 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
579 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
582 /* Configuration of logical cores. */
583 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
584 sizeof(struct fwd_lcore *) * nb_lcores,
585 RTE_CACHE_LINE_SIZE);
586 if (fwd_lcores == NULL) {
587 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
588 "failed\n", nb_lcores);
590 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
592 sizeof(struct fwd_lcore),
593 RTE_CACHE_LINE_SIZE);
594 if (fwd_lcores[lc_id] == NULL) {
595 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
598 fwd_lcores[lc_id]->cpuid_idx = lc_id;
601 RTE_ETH_FOREACH_DEV(pid) {
603 rte_eth_dev_info_get(pid, &port->dev_info);
606 if (port_numa[pid] != NUMA_NO_CONFIG)
607 port_per_socket[port_numa[pid]]++;
609 uint32_t socket_id = rte_eth_dev_socket_id(pid);
611 /* if socket_id is invalid, set to 0 */
612 if (check_socket_id(socket_id) < 0)
614 port_per_socket[socket_id]++;
618 /* set flag to initialize port/queue */
619 port->need_reconfig = 1;
620 port->need_reconfig_queues = 1;
624 * Create pools of mbuf.
625 * If NUMA support is disabled, create a single pool of mbuf in
626 * socket 0 memory by default.
627 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
629 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
630 * nb_txd can be configured at run time.
632 if (param_total_num_mbufs)
633 nb_mbuf_per_pool = param_total_num_mbufs;
635 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
636 (nb_lcores * mb_mempool_cache) +
637 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
638 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
644 for (i = 0; i < num_sockets; i++)
645 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
648 if (socket_num == UMA_NO_CONFIG)
649 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
651 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
658 * Records which Mbuf pool to use by each logical core, if needed.
660 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
661 mbp = mbuf_pool_find(
662 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
665 mbp = mbuf_pool_find(0);
666 fwd_lcores[lc_id]->mbp = mbp;
669 /* Configuration of packet forwarding streams. */
670 if (init_fwd_streams() < 0)
671 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
678 reconfig(portid_t new_port_id, unsigned socket_id)
680 struct rte_port *port;
682 /* Reconfiguration of Ethernet ports. */
683 port = &ports[new_port_id];
684 rte_eth_dev_info_get(new_port_id, &port->dev_info);
686 /* set flag to initialize port/queue */
687 port->need_reconfig = 1;
688 port->need_reconfig_queues = 1;
689 port->socket_id = socket_id;
696 init_fwd_streams(void)
699 struct rte_port *port;
700 streamid_t sm_id, nb_fwd_streams_new;
703 /* set socket id according to numa or not */
704 RTE_ETH_FOREACH_DEV(pid) {
706 if (nb_rxq > port->dev_info.max_rx_queues) {
707 printf("Fail: nb_rxq(%d) is greater than "
708 "max_rx_queues(%d)\n", nb_rxq,
709 port->dev_info.max_rx_queues);
712 if (nb_txq > port->dev_info.max_tx_queues) {
713 printf("Fail: nb_txq(%d) is greater than "
714 "max_tx_queues(%d)\n", nb_txq,
715 port->dev_info.max_tx_queues);
719 if (port_numa[pid] != NUMA_NO_CONFIG)
720 port->socket_id = port_numa[pid];
722 port->socket_id = rte_eth_dev_socket_id(pid);
724 /* if socket_id is invalid, set to 0 */
725 if (check_socket_id(port->socket_id) < 0)
730 if (socket_num == UMA_NO_CONFIG)
733 port->socket_id = socket_num;
737 q = RTE_MAX(nb_rxq, nb_txq);
739 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
742 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
743 if (nb_fwd_streams_new == nb_fwd_streams)
746 if (fwd_streams != NULL) {
747 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
748 if (fwd_streams[sm_id] == NULL)
750 rte_free(fwd_streams[sm_id]);
751 fwd_streams[sm_id] = NULL;
753 rte_free(fwd_streams);
758 nb_fwd_streams = nb_fwd_streams_new;
759 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
760 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
761 if (fwd_streams == NULL)
762 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
763 "failed\n", nb_fwd_streams);
765 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
766 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
767 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
768 if (fwd_streams[sm_id] == NULL)
769 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
776 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
778 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
780 unsigned int total_burst;
781 unsigned int nb_burst;
782 unsigned int burst_stats[3];
783 uint16_t pktnb_stats[3];
785 int burst_percent[3];
788 * First compute the total number of packet bursts and the
789 * two highest numbers of bursts of the same number of packets.
792 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
793 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
794 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
795 nb_burst = pbs->pkt_burst_spread[nb_pkt];
798 total_burst += nb_burst;
799 if (nb_burst > burst_stats[0]) {
800 burst_stats[1] = burst_stats[0];
801 pktnb_stats[1] = pktnb_stats[0];
802 burst_stats[0] = nb_burst;
803 pktnb_stats[0] = nb_pkt;
806 if (total_burst == 0)
808 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
809 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
810 burst_percent[0], (int) pktnb_stats[0]);
811 if (burst_stats[0] == total_burst) {
815 if (burst_stats[0] + burst_stats[1] == total_burst) {
816 printf(" + %d%% of %d pkts]\n",
817 100 - burst_percent[0], pktnb_stats[1]);
820 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
821 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
822 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
823 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
826 printf(" + %d%% of %d pkts + %d%% of others]\n",
827 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
829 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
832 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
834 struct rte_port *port;
837 static const char *fwd_stats_border = "----------------------";
839 port = &ports[port_id];
840 printf("\n %s Forward statistics for port %-2d %s\n",
841 fwd_stats_border, port_id, fwd_stats_border);
843 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
844 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
846 stats->ipackets, stats->imissed,
847 (uint64_t) (stats->ipackets + stats->imissed));
849 if (cur_fwd_eng == &csum_fwd_engine)
850 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
851 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
852 if ((stats->ierrors + stats->rx_nombuf) > 0) {
853 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
854 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
857 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
859 stats->opackets, port->tx_dropped,
860 (uint64_t) (stats->opackets + port->tx_dropped));
863 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
865 stats->ipackets, stats->imissed,
866 (uint64_t) (stats->ipackets + stats->imissed));
868 if (cur_fwd_eng == &csum_fwd_engine)
869 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
870 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
871 if ((stats->ierrors + stats->rx_nombuf) > 0) {
872 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
873 printf(" RX-nombufs: %14"PRIu64"\n",
877 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
879 stats->opackets, port->tx_dropped,
880 (uint64_t) (stats->opackets + port->tx_dropped));
883 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
885 pkt_burst_stats_display("RX",
886 &port->rx_stream->rx_burst_stats);
888 pkt_burst_stats_display("TX",
889 &port->tx_stream->tx_burst_stats);
892 if (port->rx_queue_stats_mapping_enabled) {
894 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
895 printf(" Stats reg %2d RX-packets:%14"PRIu64
896 " RX-errors:%14"PRIu64
897 " RX-bytes:%14"PRIu64"\n",
898 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
902 if (port->tx_queue_stats_mapping_enabled) {
903 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
904 printf(" Stats reg %2d TX-packets:%14"PRIu64
905 " TX-bytes:%14"PRIu64"\n",
906 i, stats->q_opackets[i], stats->q_obytes[i]);
910 printf(" %s--------------------------------%s\n",
911 fwd_stats_border, fwd_stats_border);
915 fwd_stream_stats_display(streamid_t stream_id)
917 struct fwd_stream *fs;
918 static const char *fwd_top_stats_border = "-------";
920 fs = fwd_streams[stream_id];
921 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
922 (fs->fwd_dropped == 0))
924 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
925 "TX Port=%2d/Queue=%2d %s\n",
926 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
927 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
928 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
929 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
931 /* if checksum mode */
932 if (cur_fwd_eng == &csum_fwd_engine) {
933 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
934 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
937 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
938 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
939 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
944 flush_fwd_rx_queues(void)
946 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
953 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
954 uint64_t timer_period;
956 /* convert to number of cycles */
957 timer_period = rte_get_timer_hz(); /* 1 second timeout */
959 for (j = 0; j < 2; j++) {
960 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
961 for (rxq = 0; rxq < nb_rxq; rxq++) {
962 port_id = fwd_ports_ids[rxp];
964 * testpmd can stuck in the below do while loop
965 * if rte_eth_rx_burst() always returns nonzero
966 * packets. So timer is added to exit this loop
967 * after 1sec timer expiry.
969 prev_tsc = rte_rdtsc();
971 nb_rx = rte_eth_rx_burst(port_id, rxq,
972 pkts_burst, MAX_PKT_BURST);
973 for (i = 0; i < nb_rx; i++)
974 rte_pktmbuf_free(pkts_burst[i]);
976 cur_tsc = rte_rdtsc();
977 diff_tsc = cur_tsc - prev_tsc;
978 timer_tsc += diff_tsc;
979 } while ((nb_rx > 0) &&
980 (timer_tsc < timer_period));
984 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
989 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
991 struct fwd_stream **fsm;
994 #ifdef RTE_LIBRTE_BITRATE
995 uint64_t tics_per_1sec;
997 uint64_t tics_current;
998 uint8_t idx_port, cnt_ports;
1000 cnt_ports = rte_eth_dev_count();
1001 tics_datum = rte_rdtsc();
1002 tics_per_1sec = rte_get_timer_hz();
1004 fsm = &fwd_streams[fc->stream_idx];
1005 nb_fs = fc->stream_nb;
1007 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1008 (*pkt_fwd)(fsm[sm_id]);
1009 #ifdef RTE_LIBRTE_BITRATE
1010 if (bitrate_enabled != 0 &&
1011 bitrate_lcore_id == rte_lcore_id()) {
1012 tics_current = rte_rdtsc();
1013 if (tics_current - tics_datum >= tics_per_1sec) {
1014 /* Periodic bitrate calculation */
1016 idx_port < cnt_ports;
1018 rte_stats_bitrate_calc(bitrate_data,
1020 tics_datum = tics_current;
1024 #ifdef RTE_LIBRTE_LATENCY_STATS
1025 if (latencystats_enabled != 0 &&
1026 latencystats_lcore_id == rte_lcore_id())
1027 rte_latencystats_update();
1030 } while (! fc->stopped);
1034 start_pkt_forward_on_core(void *fwd_arg)
1036 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1037 cur_fwd_config.fwd_eng->packet_fwd);
1042 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1043 * Used to start communication flows in network loopback test configurations.
1046 run_one_txonly_burst_on_core(void *fwd_arg)
1048 struct fwd_lcore *fwd_lc;
1049 struct fwd_lcore tmp_lcore;
1051 fwd_lc = (struct fwd_lcore *) fwd_arg;
1052 tmp_lcore = *fwd_lc;
1053 tmp_lcore.stopped = 1;
1054 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1059 * Launch packet forwarding:
1060 * - Setup per-port forwarding context.
1061 * - launch logical cores with their forwarding configuration.
1064 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1066 port_fwd_begin_t port_fwd_begin;
1071 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1072 if (port_fwd_begin != NULL) {
1073 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1074 (*port_fwd_begin)(fwd_ports_ids[i]);
1076 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1077 lc_id = fwd_lcores_cpuids[i];
1078 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1079 fwd_lcores[i]->stopped = 0;
1080 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1081 fwd_lcores[i], lc_id);
1083 printf("launch lcore %u failed - diag=%d\n",
1090 * Launch packet forwarding configuration.
1093 start_packet_forwarding(int with_tx_first)
1095 port_fwd_begin_t port_fwd_begin;
1096 port_fwd_end_t port_fwd_end;
1097 struct rte_port *port;
1102 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1103 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1105 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1106 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1108 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1109 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1110 (!nb_rxq || !nb_txq))
1111 rte_exit(EXIT_FAILURE,
1112 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1113 cur_fwd_eng->fwd_mode_name);
1115 if (all_ports_started() == 0) {
1116 printf("Not all ports were started\n");
1119 if (test_done == 0) {
1120 printf("Packet forwarding already started\n");
1124 if (init_fwd_streams() < 0) {
1125 printf("Fail from init_fwd_streams()\n");
1130 for (i = 0; i < nb_fwd_ports; i++) {
1131 pt_id = fwd_ports_ids[i];
1132 port = &ports[pt_id];
1133 if (!port->dcb_flag) {
1134 printf("In DCB mode, all forwarding ports must "
1135 "be configured in this mode.\n");
1139 if (nb_fwd_lcores == 1) {
1140 printf("In DCB mode,the nb forwarding cores "
1141 "should be larger than 1.\n");
1148 flush_fwd_rx_queues();
1151 pkt_fwd_config_display(&cur_fwd_config);
1152 rxtx_config_display();
1154 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1155 pt_id = fwd_ports_ids[i];
1156 port = &ports[pt_id];
1157 rte_eth_stats_get(pt_id, &port->stats);
1158 port->tx_dropped = 0;
1160 map_port_queue_stats_mapping_registers(pt_id, port);
1162 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1163 fwd_streams[sm_id]->rx_packets = 0;
1164 fwd_streams[sm_id]->tx_packets = 0;
1165 fwd_streams[sm_id]->fwd_dropped = 0;
1166 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1167 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1169 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1170 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1171 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1172 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1173 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1175 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1176 fwd_streams[sm_id]->core_cycles = 0;
1179 if (with_tx_first) {
1180 port_fwd_begin = tx_only_engine.port_fwd_begin;
1181 if (port_fwd_begin != NULL) {
1182 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1183 (*port_fwd_begin)(fwd_ports_ids[i]);
1185 while (with_tx_first--) {
1186 launch_packet_forwarding(
1187 run_one_txonly_burst_on_core);
1188 rte_eal_mp_wait_lcore();
1190 port_fwd_end = tx_only_engine.port_fwd_end;
1191 if (port_fwd_end != NULL) {
1192 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1193 (*port_fwd_end)(fwd_ports_ids[i]);
1196 launch_packet_forwarding(start_pkt_forward_on_core);
1200 stop_packet_forwarding(void)
1202 struct rte_eth_stats stats;
1203 struct rte_port *port;
1204 port_fwd_end_t port_fwd_end;
1209 uint64_t total_recv;
1210 uint64_t total_xmit;
1211 uint64_t total_rx_dropped;
1212 uint64_t total_tx_dropped;
1213 uint64_t total_rx_nombuf;
1214 uint64_t tx_dropped;
1215 uint64_t rx_bad_ip_csum;
1216 uint64_t rx_bad_l4_csum;
1217 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1218 uint64_t fwd_cycles;
1220 static const char *acc_stats_border = "+++++++++++++++";
1223 printf("Packet forwarding not started\n");
1226 printf("Telling cores to stop...");
1227 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1228 fwd_lcores[lc_id]->stopped = 1;
1229 printf("\nWaiting for lcores to finish...\n");
1230 rte_eal_mp_wait_lcore();
1231 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1232 if (port_fwd_end != NULL) {
1233 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1234 pt_id = fwd_ports_ids[i];
1235 (*port_fwd_end)(pt_id);
1238 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1241 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1242 if (cur_fwd_config.nb_fwd_streams >
1243 cur_fwd_config.nb_fwd_ports) {
1244 fwd_stream_stats_display(sm_id);
1245 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1246 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1248 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1250 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1253 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1254 tx_dropped = (uint64_t) (tx_dropped +
1255 fwd_streams[sm_id]->fwd_dropped);
1256 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1259 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1260 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1261 fwd_streams[sm_id]->rx_bad_ip_csum);
1262 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1266 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1267 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1268 fwd_streams[sm_id]->rx_bad_l4_csum);
1269 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 fwd_cycles = (uint64_t) (fwd_cycles +
1274 fwd_streams[sm_id]->core_cycles);
1279 total_rx_dropped = 0;
1280 total_tx_dropped = 0;
1281 total_rx_nombuf = 0;
1282 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1283 pt_id = fwd_ports_ids[i];
1285 port = &ports[pt_id];
1286 rte_eth_stats_get(pt_id, &stats);
1287 stats.ipackets -= port->stats.ipackets;
1288 port->stats.ipackets = 0;
1289 stats.opackets -= port->stats.opackets;
1290 port->stats.opackets = 0;
1291 stats.ibytes -= port->stats.ibytes;
1292 port->stats.ibytes = 0;
1293 stats.obytes -= port->stats.obytes;
1294 port->stats.obytes = 0;
1295 stats.imissed -= port->stats.imissed;
1296 port->stats.imissed = 0;
1297 stats.oerrors -= port->stats.oerrors;
1298 port->stats.oerrors = 0;
1299 stats.rx_nombuf -= port->stats.rx_nombuf;
1300 port->stats.rx_nombuf = 0;
1302 total_recv += stats.ipackets;
1303 total_xmit += stats.opackets;
1304 total_rx_dropped += stats.imissed;
1305 total_tx_dropped += port->tx_dropped;
1306 total_rx_nombuf += stats.rx_nombuf;
1308 fwd_port_stats_display(pt_id, &stats);
1310 printf("\n %s Accumulated forward statistics for all ports"
1312 acc_stats_border, acc_stats_border);
1313 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1315 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1317 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1318 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1319 if (total_rx_nombuf > 0)
1320 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1321 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1323 acc_stats_border, acc_stats_border);
1324 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1326 printf("\n CPU cycles/packet=%u (total cycles="
1327 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1328 (unsigned int)(fwd_cycles / total_recv),
1329 fwd_cycles, total_recv);
1331 printf("\nDone.\n");
1336 dev_set_link_up(portid_t pid)
1338 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1339 printf("\nSet link up fail.\n");
1343 dev_set_link_down(portid_t pid)
1345 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1346 printf("\nSet link down fail.\n");
1350 all_ports_started(void)
1353 struct rte_port *port;
1355 RTE_ETH_FOREACH_DEV(pi) {
1357 /* Check if there is a port which is not started */
1358 if ((port->port_status != RTE_PORT_STARTED) &&
1359 (port->slave_flag == 0))
1363 /* No port is not started */
1368 all_ports_stopped(void)
1371 struct rte_port *port;
1373 RTE_ETH_FOREACH_DEV(pi) {
1375 if ((port->port_status != RTE_PORT_STOPPED) &&
1376 (port->slave_flag == 0))
1384 port_is_started(portid_t port_id)
1386 if (port_id_is_invalid(port_id, ENABLED_WARN))
1389 if (ports[port_id].port_status != RTE_PORT_STARTED)
1396 port_is_closed(portid_t port_id)
1398 if (port_id_is_invalid(port_id, ENABLED_WARN))
1401 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1408 start_port(portid_t pid)
1410 int diag, need_check_link_status = -1;
1413 struct rte_port *port;
1414 struct ether_addr mac_addr;
1415 enum rte_eth_event_type event_type;
1417 if (port_id_is_invalid(pid, ENABLED_WARN))
1422 RTE_ETH_FOREACH_DEV(pi) {
1423 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1426 need_check_link_status = 0;
1428 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1429 RTE_PORT_HANDLING) == 0) {
1430 printf("Port %d is now not stopped\n", pi);
1434 if (port->need_reconfig > 0) {
1435 port->need_reconfig = 0;
1437 if (flow_isolate_all) {
1438 int ret = port_flow_isolate(pi, 1);
1440 printf("Failed to apply isolated"
1441 " mode on port %d\n", pi);
1446 printf("Configuring Port %d (socket %u)\n", pi,
1448 /* configure port */
1449 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1452 if (rte_atomic16_cmpset(&(port->port_status),
1453 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1454 printf("Port %d can not be set back "
1455 "to stopped\n", pi);
1456 printf("Fail to configure port %d\n", pi);
1457 /* try to reconfigure port next time */
1458 port->need_reconfig = 1;
1462 if (port->need_reconfig_queues > 0) {
1463 port->need_reconfig_queues = 0;
1464 /* setup tx queues */
1465 for (qi = 0; qi < nb_txq; qi++) {
1466 if ((numa_support) &&
1467 (txring_numa[pi] != NUMA_NO_CONFIG))
1468 diag = rte_eth_tx_queue_setup(pi, qi,
1469 nb_txd,txring_numa[pi],
1472 diag = rte_eth_tx_queue_setup(pi, qi,
1473 nb_txd,port->socket_id,
1479 /* Fail to setup tx queue, return */
1480 if (rte_atomic16_cmpset(&(port->port_status),
1482 RTE_PORT_STOPPED) == 0)
1483 printf("Port %d can not be set back "
1484 "to stopped\n", pi);
1485 printf("Fail to configure port %d tx queues\n", pi);
1486 /* try to reconfigure queues next time */
1487 port->need_reconfig_queues = 1;
1490 /* setup rx queues */
1491 for (qi = 0; qi < nb_rxq; qi++) {
1492 if ((numa_support) &&
1493 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1494 struct rte_mempool * mp =
1495 mbuf_pool_find(rxring_numa[pi]);
1497 printf("Failed to setup RX queue:"
1498 "No mempool allocation"
1499 " on the socket %d\n",
1504 diag = rte_eth_rx_queue_setup(pi, qi,
1505 nb_rxd,rxring_numa[pi],
1506 &(port->rx_conf),mp);
1508 struct rte_mempool *mp =
1509 mbuf_pool_find(port->socket_id);
1511 printf("Failed to setup RX queue:"
1512 "No mempool allocation"
1513 " on the socket %d\n",
1517 diag = rte_eth_rx_queue_setup(pi, qi,
1518 nb_rxd,port->socket_id,
1519 &(port->rx_conf), mp);
1524 /* Fail to setup rx queue, return */
1525 if (rte_atomic16_cmpset(&(port->port_status),
1527 RTE_PORT_STOPPED) == 0)
1528 printf("Port %d can not be set back "
1529 "to stopped\n", pi);
1530 printf("Fail to configure port %d rx queues\n", pi);
1531 /* try to reconfigure queues next time */
1532 port->need_reconfig_queues = 1;
1537 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1538 event_type < RTE_ETH_EVENT_MAX;
1540 diag = rte_eth_dev_callback_register(pi,
1545 printf("Failed to setup even callback for event %d\n",
1552 if (rte_eth_dev_start(pi) < 0) {
1553 printf("Fail to start port %d\n", pi);
1555 /* Fail to setup rx queue, return */
1556 if (rte_atomic16_cmpset(&(port->port_status),
1557 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1558 printf("Port %d can not be set back to "
1563 if (rte_atomic16_cmpset(&(port->port_status),
1564 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1565 printf("Port %d can not be set into started\n", pi);
1567 rte_eth_macaddr_get(pi, &mac_addr);
1568 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1569 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1570 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1571 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1573 /* at least one port started, need checking link status */
1574 need_check_link_status = 1;
1577 if (need_check_link_status == 1 && !no_link_check)
1578 check_all_ports_link_status(RTE_PORT_ALL);
1579 else if (need_check_link_status == 0)
1580 printf("Please stop the ports first\n");
1587 stop_port(portid_t pid)
1590 struct rte_port *port;
1591 int need_check_link_status = 0;
1598 if (port_id_is_invalid(pid, ENABLED_WARN))
1601 printf("Stopping ports...\n");
1603 RTE_ETH_FOREACH_DEV(pi) {
1604 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1607 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1608 printf("Please remove port %d from forwarding configuration.\n", pi);
1612 if (port_is_bonding_slave(pi)) {
1613 printf("Please remove port %d from bonded device.\n", pi);
1618 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1619 RTE_PORT_HANDLING) == 0)
1622 rte_eth_dev_stop(pi);
1624 if (rte_atomic16_cmpset(&(port->port_status),
1625 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1626 printf("Port %d can not be set into stopped\n", pi);
1627 need_check_link_status = 1;
1629 if (need_check_link_status && !no_link_check)
1630 check_all_ports_link_status(RTE_PORT_ALL);
1636 close_port(portid_t pid)
1639 struct rte_port *port;
1641 if (port_id_is_invalid(pid, ENABLED_WARN))
1644 printf("Closing ports...\n");
1646 RTE_ETH_FOREACH_DEV(pi) {
1647 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1650 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1651 printf("Please remove port %d from forwarding configuration.\n", pi);
1655 if (port_is_bonding_slave(pi)) {
1656 printf("Please remove port %d from bonded device.\n", pi);
1661 if (rte_atomic16_cmpset(&(port->port_status),
1662 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1663 printf("Port %d is already closed\n", pi);
1667 if (rte_atomic16_cmpset(&(port->port_status),
1668 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1669 printf("Port %d is now not stopped\n", pi);
1673 if (port->flow_list)
1674 port_flow_flush(pi);
1675 rte_eth_dev_close(pi);
1677 if (rte_atomic16_cmpset(&(port->port_status),
1678 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1679 printf("Port %d cannot be set to closed\n", pi);
1686 attach_port(char *identifier)
1689 unsigned int socket_id;
1691 printf("Attaching a new port...\n");
1693 if (identifier == NULL) {
1694 printf("Invalid parameters are specified\n");
1698 if (rte_eth_dev_attach(identifier, &pi))
1701 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1702 /* if socket_id is invalid, set to 0 */
1703 if (check_socket_id(socket_id) < 0)
1705 reconfig(pi, socket_id);
1706 rte_eth_promiscuous_enable(pi);
1708 nb_ports = rte_eth_dev_count();
1710 ports[pi].port_status = RTE_PORT_STOPPED;
1712 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1717 detach_port(uint8_t port_id)
1719 char name[RTE_ETH_NAME_MAX_LEN];
1721 printf("Detaching a port...\n");
1723 if (!port_is_closed(port_id)) {
1724 printf("Please close port first\n");
1728 if (ports[port_id].flow_list)
1729 port_flow_flush(port_id);
1731 if (rte_eth_dev_detach(port_id, name)) {
1732 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1736 nb_ports = rte_eth_dev_count();
1738 printf("Port '%s' is detached. Now total ports is %d\n",
1750 stop_packet_forwarding();
1752 if (ports != NULL) {
1754 RTE_ETH_FOREACH_DEV(pt_id) {
1755 printf("\nShutting down port %d...\n", pt_id);
1761 printf("\nBye...\n");
1764 typedef void (*cmd_func_t)(void);
1765 struct pmd_test_command {
1766 const char *cmd_name;
1767 cmd_func_t cmd_func;
1770 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1772 /* Check the link status of all ports in up to 9s, and print them finally */
1774 check_all_ports_link_status(uint32_t port_mask)
1776 #define CHECK_INTERVAL 100 /* 100ms */
1777 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1778 uint8_t portid, count, all_ports_up, print_flag = 0;
1779 struct rte_eth_link link;
1781 printf("Checking link statuses...\n");
1783 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1785 RTE_ETH_FOREACH_DEV(portid) {
1786 if ((port_mask & (1 << portid)) == 0)
1788 memset(&link, 0, sizeof(link));
1789 rte_eth_link_get_nowait(portid, &link);
1790 /* print link status if flag set */
1791 if (print_flag == 1) {
1792 if (link.link_status)
1793 printf("Port %d Link Up - speed %u "
1794 "Mbps - %s\n", (uint8_t)portid,
1795 (unsigned)link.link_speed,
1796 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1797 ("full-duplex") : ("half-duplex\n"));
1799 printf("Port %d Link Down\n",
1803 /* clear all_ports_up flag if any link down */
1804 if (link.link_status == ETH_LINK_DOWN) {
1809 /* after finally printing all link status, get out */
1810 if (print_flag == 1)
1813 if (all_ports_up == 0) {
1815 rte_delay_ms(CHECK_INTERVAL);
1818 /* set the print_flag if all ports up or timeout */
1819 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1829 rmv_event_callback(void *arg)
1831 struct rte_eth_dev *dev;
1832 uint8_t port_id = (intptr_t)arg;
1834 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1835 dev = &rte_eth_devices[port_id];
1838 close_port(port_id);
1839 printf("removing device %s\n", dev->device->name);
1840 if (rte_eal_dev_detach(dev->device))
1841 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1845 /* This function is used by the interrupt thread */
1847 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1850 static const char * const event_desc[] = {
1851 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1852 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1853 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1854 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1855 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1856 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1857 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1858 [RTE_ETH_EVENT_MAX] = NULL,
1861 RTE_SET_USED(param);
1862 RTE_SET_USED(ret_param);
1864 if (type >= RTE_ETH_EVENT_MAX) {
1865 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1866 port_id, __func__, type);
1868 } else if (event_print_mask & (UINT32_C(1) << type)) {
1869 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1875 case RTE_ETH_EVENT_INTR_RMV:
1876 if (rte_eal_alarm_set(100000,
1877 rmv_event_callback, (void *)(intptr_t)port_id))
1878 fprintf(stderr, "Could not set up deferred device removal\n");
1887 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1891 uint8_t mapping_found = 0;
1893 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1894 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1895 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1896 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1897 tx_queue_stats_mappings[i].queue_id,
1898 tx_queue_stats_mappings[i].stats_counter_id);
1905 port->tx_queue_stats_mapping_enabled = 1;
1910 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1914 uint8_t mapping_found = 0;
1916 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1917 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1918 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1919 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1920 rx_queue_stats_mappings[i].queue_id,
1921 rx_queue_stats_mappings[i].stats_counter_id);
1928 port->rx_queue_stats_mapping_enabled = 1;
1933 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1937 diag = set_tx_queue_stats_mapping_registers(pi, port);
1939 if (diag == -ENOTSUP) {
1940 port->tx_queue_stats_mapping_enabled = 0;
1941 printf("TX queue stats mapping not supported port id=%d\n", pi);
1944 rte_exit(EXIT_FAILURE,
1945 "set_tx_queue_stats_mapping_registers "
1946 "failed for port id=%d diag=%d\n",
1950 diag = set_rx_queue_stats_mapping_registers(pi, port);
1952 if (diag == -ENOTSUP) {
1953 port->rx_queue_stats_mapping_enabled = 0;
1954 printf("RX queue stats mapping not supported port id=%d\n", pi);
1957 rte_exit(EXIT_FAILURE,
1958 "set_rx_queue_stats_mapping_registers "
1959 "failed for port id=%d diag=%d\n",
1965 rxtx_port_config(struct rte_port *port)
1967 port->rx_conf = port->dev_info.default_rxconf;
1968 port->tx_conf = port->dev_info.default_txconf;
1970 /* Check if any RX/TX parameters have been passed */
1971 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1972 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1974 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1975 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1977 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1978 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1980 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1981 port->rx_conf.rx_free_thresh = rx_free_thresh;
1983 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1984 port->rx_conf.rx_drop_en = rx_drop_en;
1986 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1987 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1989 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1990 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1992 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1993 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1995 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1996 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1998 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1999 port->tx_conf.tx_free_thresh = tx_free_thresh;
2001 if (txq_flags != RTE_PMD_PARAM_UNSET)
2002 port->tx_conf.txq_flags = txq_flags;
2006 init_port_config(void)
2009 struct rte_port *port;
2011 RTE_ETH_FOREACH_DEV(pid) {
2013 port->dev_conf.rxmode = rx_mode;
2014 port->dev_conf.fdir_conf = fdir_conf;
2016 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2017 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2019 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2020 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2023 if (port->dcb_flag == 0) {
2024 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2025 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2027 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2030 rxtx_port_config(port);
2032 rte_eth_macaddr_get(pid, &port->eth_addr);
2034 map_port_queue_stats_mapping_registers(pid, port);
2035 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2036 rte_pmd_ixgbe_bypass_init(pid);
2039 if (lsc_interrupt &&
2040 (rte_eth_devices[pid].data->dev_flags &
2041 RTE_ETH_DEV_INTR_LSC))
2042 port->dev_conf.intr_conf.lsc = 1;
2043 if (rmv_interrupt &&
2044 (rte_eth_devices[pid].data->dev_flags &
2045 RTE_ETH_DEV_INTR_RMV))
2046 port->dev_conf.intr_conf.rmv = 1;
2050 void set_port_slave_flag(portid_t slave_pid)
2052 struct rte_port *port;
2054 port = &ports[slave_pid];
2055 port->slave_flag = 1;
2058 void clear_port_slave_flag(portid_t slave_pid)
2060 struct rte_port *port;
2062 port = &ports[slave_pid];
2063 port->slave_flag = 0;
2066 uint8_t port_is_bonding_slave(portid_t slave_pid)
2068 struct rte_port *port;
2070 port = &ports[slave_pid];
2071 return port->slave_flag;
2074 const uint16_t vlan_tags[] = {
2075 0, 1, 2, 3, 4, 5, 6, 7,
2076 8, 9, 10, 11, 12, 13, 14, 15,
2077 16, 17, 18, 19, 20, 21, 22, 23,
2078 24, 25, 26, 27, 28, 29, 30, 31
2082 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2083 enum dcb_mode_enable dcb_mode,
2084 enum rte_eth_nb_tcs num_tcs,
2090 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2091 * given above, and the number of traffic classes available for use.
2093 if (dcb_mode == DCB_VT_ENABLED) {
2094 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2095 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2096 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2097 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2099 /* VMDQ+DCB RX and TX configurations */
2100 vmdq_rx_conf->enable_default_pool = 0;
2101 vmdq_rx_conf->default_pool = 0;
2102 vmdq_rx_conf->nb_queue_pools =
2103 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2104 vmdq_tx_conf->nb_queue_pools =
2105 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2107 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2108 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2109 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2110 vmdq_rx_conf->pool_map[i].pools =
2111 1 << (i % vmdq_rx_conf->nb_queue_pools);
2113 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2114 vmdq_rx_conf->dcb_tc[i] = i;
2115 vmdq_tx_conf->dcb_tc[i] = i;
2118 /* set DCB mode of RX and TX of multiple queues */
2119 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2120 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2122 struct rte_eth_dcb_rx_conf *rx_conf =
2123 ð_conf->rx_adv_conf.dcb_rx_conf;
2124 struct rte_eth_dcb_tx_conf *tx_conf =
2125 ð_conf->tx_adv_conf.dcb_tx_conf;
2127 rx_conf->nb_tcs = num_tcs;
2128 tx_conf->nb_tcs = num_tcs;
2130 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2131 rx_conf->dcb_tc[i] = i % num_tcs;
2132 tx_conf->dcb_tc[i] = i % num_tcs;
2134 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2135 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2136 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2140 eth_conf->dcb_capability_en =
2141 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2143 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2149 init_port_dcb_config(portid_t pid,
2150 enum dcb_mode_enable dcb_mode,
2151 enum rte_eth_nb_tcs num_tcs,
2154 struct rte_eth_conf port_conf;
2155 struct rte_port *rte_port;
2159 rte_port = &ports[pid];
2161 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2162 /* Enter DCB configuration status */
2165 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2166 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2169 port_conf.rxmode.hw_vlan_filter = 1;
2172 * Write the configuration into the device.
2173 * Set the numbers of RX & TX queues to 0, so
2174 * the RX & TX queues will not be setup.
2176 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2178 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2180 /* If dev_info.vmdq_pool_base is greater than 0,
2181 * the queue id of vmdq pools is started after pf queues.
2183 if (dcb_mode == DCB_VT_ENABLED &&
2184 rte_port->dev_info.vmdq_pool_base > 0) {
2185 printf("VMDQ_DCB multi-queue mode is nonsensical"
2186 " for port %d.", pid);
2190 /* Assume the ports in testpmd have the same dcb capability
2191 * and has the same number of rxq and txq in dcb mode
2193 if (dcb_mode == DCB_VT_ENABLED) {
2194 if (rte_port->dev_info.max_vfs > 0) {
2195 nb_rxq = rte_port->dev_info.nb_rx_queues;
2196 nb_txq = rte_port->dev_info.nb_tx_queues;
2198 nb_rxq = rte_port->dev_info.max_rx_queues;
2199 nb_txq = rte_port->dev_info.max_tx_queues;
2202 /*if vt is disabled, use all pf queues */
2203 if (rte_port->dev_info.vmdq_pool_base == 0) {
2204 nb_rxq = rte_port->dev_info.max_rx_queues;
2205 nb_txq = rte_port->dev_info.max_tx_queues;
2207 nb_rxq = (queueid_t)num_tcs;
2208 nb_txq = (queueid_t)num_tcs;
2212 rx_free_thresh = 64;
2214 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2216 rxtx_port_config(rte_port);
2218 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2219 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2220 rx_vft_set(pid, vlan_tags[i], 1);
2222 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2223 map_port_queue_stats_mapping_registers(pid, rte_port);
2225 rte_port->dcb_flag = 1;
2233 /* Configuration of Ethernet ports. */
2234 ports = rte_zmalloc("testpmd: ports",
2235 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2236 RTE_CACHE_LINE_SIZE);
2237 if (ports == NULL) {
2238 rte_exit(EXIT_FAILURE,
2239 "rte_zmalloc(%d struct rte_port) failed\n",
2255 const char clr[] = { 27, '[', '2', 'J', '\0' };
2256 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2258 /* Clear screen and move to top left */
2259 printf("%s%s", clr, top_left);
2261 printf("\nPort statistics ====================================");
2262 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2263 nic_stats_display(fwd_ports_ids[i]);
2267 signal_handler(int signum)
2269 if (signum == SIGINT || signum == SIGTERM) {
2270 printf("\nSignal %d received, preparing to exit...\n",
2272 #ifdef RTE_LIBRTE_PDUMP
2273 /* uninitialize packet capture framework */
2276 #ifdef RTE_LIBRTE_LATENCY_STATS
2277 rte_latencystats_uninit();
2280 /* exit with the expected status */
2281 signal(signum, SIG_DFL);
2282 kill(getpid(), signum);
2287 main(int argc, char** argv)
2292 signal(SIGINT, signal_handler);
2293 signal(SIGTERM, signal_handler);
2295 diag = rte_eal_init(argc, argv);
2297 rte_panic("Cannot init EAL\n");
2299 #ifdef RTE_LIBRTE_PDUMP
2300 /* initialize packet capture framework */
2301 rte_pdump_init(NULL);
2304 nb_ports = (portid_t) rte_eth_dev_count();
2306 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2308 /* allocate port structures, and init them */
2311 set_def_fwd_config();
2313 rte_panic("Empty set of forwarding logical cores - check the "
2314 "core mask supplied in the command parameters\n");
2316 /* Bitrate/latency stats disabled by default */
2317 #ifdef RTE_LIBRTE_BITRATE
2318 bitrate_enabled = 0;
2320 #ifdef RTE_LIBRTE_LATENCY_STATS
2321 latencystats_enabled = 0;
2327 launch_args_parse(argc, argv);
2329 if (tx_first && interactive)
2330 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2331 "interactive mode.\n");
2333 if (tx_first && lsc_interrupt) {
2334 printf("Warning: lsc_interrupt needs to be off when "
2335 " using tx_first. Disabling.\n");
2339 if (!nb_rxq && !nb_txq)
2340 printf("Warning: Either rx or tx queues should be non-zero\n");
2342 if (nb_rxq > 1 && nb_rxq > nb_txq)
2343 printf("Warning: nb_rxq=%d enables RSS configuration, "
2344 "but nb_txq=%d will prevent to fully test it.\n",
2348 if (start_port(RTE_PORT_ALL) != 0)
2349 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2351 /* set all ports to promiscuous mode by default */
2352 RTE_ETH_FOREACH_DEV(port_id)
2353 rte_eth_promiscuous_enable(port_id);
2355 /* Init metrics library */
2356 rte_metrics_init(rte_socket_id());
2358 #ifdef RTE_LIBRTE_LATENCY_STATS
2359 if (latencystats_enabled != 0) {
2360 int ret = rte_latencystats_init(1, NULL);
2362 printf("Warning: latencystats init()"
2363 " returned error %d\n", ret);
2364 printf("Latencystats running on lcore %d\n",
2365 latencystats_lcore_id);
2369 /* Setup bitrate stats */
2370 #ifdef RTE_LIBRTE_BITRATE
2371 if (bitrate_enabled != 0) {
2372 bitrate_data = rte_stats_bitrate_create();
2373 if (bitrate_data == NULL)
2374 rte_exit(EXIT_FAILURE,
2375 "Could not allocate bitrate data.\n");
2376 rte_stats_bitrate_reg(bitrate_data);
2380 #ifdef RTE_LIBRTE_CMDLINE
2381 if (strlen(cmdline_filename) != 0)
2382 cmdline_read_from_file(cmdline_filename);
2384 if (interactive == 1) {
2386 printf("Start automatic packet forwarding\n");
2387 start_packet_forwarding(0);
2397 printf("No commandline core given, start packet forwarding\n");
2398 start_packet_forwarding(tx_first);
2399 if (stats_period != 0) {
2400 uint64_t prev_time = 0, cur_time, diff_time = 0;
2401 uint64_t timer_period;
2403 /* Convert to number of cycles */
2404 timer_period = stats_period * rte_get_timer_hz();
2407 cur_time = rte_get_timer_cycles();
2408 diff_time += cur_time - prev_time;
2410 if (diff_time >= timer_period) {
2412 /* Reset the timer */
2415 /* Sleep to avoid unnecessary checks */
2416 prev_time = cur_time;
2421 printf("Press enter to exit\n");
2422 rc = read(0, &c, 1);