4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
95 uint16_t verbose_level = 0; /**< Silent by default. */
97 /* use master core for command line ? */
98 uint8_t interactive = 0;
99 uint8_t auto_start = 0;
101 char cmdline_filename[PATH_MAX] = {0};
104 * NUMA support configuration.
105 * When set, the NUMA support attempts to dispatch the allocation of the
106 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
107 * probed ports among the CPU sockets 0 and 1.
108 * Otherwise, all memory is allocated from CPU socket 0.
110 uint8_t numa_support = 1; /**< numa enabled by default */
113 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
116 uint8_t socket_num = UMA_NO_CONFIG;
119 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
124 * Record the Ethernet address of peer target ports to which packets are
126 * Must be instantiated with the ethernet addresses of peer traffic generator
129 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
130 portid_t nb_peer_eth_addrs = 0;
133 * Probed Target Environment.
135 struct rte_port *ports; /**< For all probed ethernet ports. */
136 portid_t nb_ports; /**< Number of probed ethernet ports. */
137 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
138 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
141 * Test Forwarding Configuration.
142 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
143 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
145 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
146 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
147 portid_t nb_cfg_ports; /**< Number of configured ports. */
148 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
150 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
151 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
153 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
154 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
157 * Forwarding engines.
159 struct fwd_engine * fwd_engines[] = {
168 #ifdef RTE_LIBRTE_IEEE1588
169 &ieee1588_fwd_engine,
174 struct fwd_config cur_fwd_config;
175 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
176 uint32_t retry_enabled;
177 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
178 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
180 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
181 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
182 * specified on command-line. */
183 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
214 #define RTE_TEST_RX_DESC_DEFAULT 128
215 #define RTE_TEST_TX_DESC_DEFAULT 512
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 #define RTE_PMD_PARAM_UNSET -1
221 * Configurable values of RX and TX ring threshold registers.
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 * Configurable value of RX free threshold.
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 * Configurable value of RX drop enable.
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 * Configurable value of TX free threshold.
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 * Configurable value of TX RS bit threshold.
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 * Configurable value of TX queue flags.
255 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
258 * Receive Side Scaling (RSS) configuration.
260 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
263 * Port topology configuration
265 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
268 * Avoids to flush all the RX streams before starts forwarding.
270 uint8_t no_flush_rx = 0; /* flush by default */
273 * Flow API isolated mode.
275 uint8_t flow_isolate_all;
278 * Avoids to check link status when starting/stopping a port.
280 uint8_t no_link_check = 0; /* check by default */
283 * Enable link status change notification
285 uint8_t lsc_interrupt = 1; /* enabled by default */
288 * Enable device removal notification.
290 uint8_t rmv_interrupt = 1; /* enabled by default */
293 * Display or mask ether events
294 * Default to all events except VF_MBOX
296 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
298 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
299 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
300 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
304 * NIC bypass mode configuration options.
307 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
308 /* The NIC bypass watchdog timeout. */
309 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
313 #ifdef RTE_LIBRTE_LATENCY_STATS
316 * Set when latency stats is enabled in the commandline
318 uint8_t latencystats_enabled;
321 * Lcore ID to serive latency statistics.
323 lcoreid_t latencystats_lcore_id = -1;
328 * Ethernet device configuration.
330 struct rte_eth_rxmode rx_mode = {
331 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
333 .header_split = 0, /**< Header Split disabled. */
334 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
335 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
336 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
337 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
338 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
339 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
342 struct rte_fdir_conf fdir_conf = {
343 .mode = RTE_FDIR_MODE_NONE,
344 .pballoc = RTE_FDIR_PBALLOC_64K,
345 .status = RTE_FDIR_REPORT_STATUS,
347 .vlan_tci_mask = 0x0,
349 .src_ip = 0xFFFFFFFF,
350 .dst_ip = 0xFFFFFFFF,
353 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 .src_port_mask = 0xFFFF,
357 .dst_port_mask = 0xFFFF,
358 .mac_addr_byte_mask = 0xFF,
359 .tunnel_type_mask = 1,
360 .tunnel_id_mask = 0xFFFFFFFF,
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
376 unsigned int num_sockets = 0;
377 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
379 #ifdef RTE_LIBRTE_BITRATE
380 /* Bitrate statistics */
381 struct rte_stats_bitrates *bitrate_data;
382 lcoreid_t bitrate_lcore_id;
383 uint8_t bitrate_enabled;
386 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
388 /* Forward function declarations */
389 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 enum rte_eth_event_type type,
393 void *param, void *ret_param);
396 * Check if all the ports are started.
397 * If yes, return positive value. If not, return zero.
399 static int all_ports_started(void);
402 * Helper function to check if socket is already discovered.
403 * If yes, return positive value. If not, return zero.
406 new_socket_id(unsigned int socket_id)
410 for (i = 0; i < num_sockets; i++) {
411 if (socket_ids[i] == socket_id)
418 * Setup default configuration.
421 set_default_fwd_lcores_config(void)
425 unsigned int sock_num;
428 for (i = 0; i < RTE_MAX_LCORE; i++) {
429 sock_num = rte_lcore_to_socket_id(i);
430 if (new_socket_id(sock_num)) {
431 if (num_sockets >= RTE_MAX_NUMA_NODES) {
432 rte_exit(EXIT_FAILURE,
433 "Total sockets greater than %u\n",
436 socket_ids[num_sockets++] = sock_num;
438 if (!rte_lcore_is_enabled(i))
440 if (i == rte_get_master_lcore())
442 fwd_lcores_cpuids[nb_lc++] = i;
444 nb_lcores = (lcoreid_t) nb_lc;
445 nb_cfg_lcores = nb_lcores;
450 set_def_peer_eth_addrs(void)
454 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
455 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
456 peer_eth_addrs[i].addr_bytes[5] = i;
461 set_default_fwd_ports_config(void)
465 for (pt_id = 0; pt_id < nb_ports; pt_id++)
466 fwd_ports_ids[pt_id] = pt_id;
468 nb_cfg_ports = nb_ports;
469 nb_fwd_ports = nb_ports;
473 set_def_fwd_config(void)
475 set_default_fwd_lcores_config();
476 set_def_peer_eth_addrs();
477 set_default_fwd_ports_config();
481 * Configuration initialisation done once at init time.
484 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
485 unsigned int socket_id)
487 char pool_name[RTE_MEMPOOL_NAMESIZE];
488 struct rte_mempool *rte_mp = NULL;
491 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
492 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
495 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
496 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
499 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
500 mb_size, (unsigned) mb_mempool_cache,
501 sizeof(struct rte_pktmbuf_pool_private),
506 if (rte_mempool_populate_anon(rte_mp) == 0) {
507 rte_mempool_free(rte_mp);
511 rte_pktmbuf_pool_init(rte_mp, NULL);
512 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
514 /* wrapper to rte_mempool_create() */
515 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
516 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
520 if (rte_mp == NULL) {
521 rte_exit(EXIT_FAILURE,
522 "Creation of mbuf pool for socket %u failed: %s\n",
523 socket_id, rte_strerror(rte_errno));
524 } else if (verbose_level > 0) {
525 rte_mempool_dump(stdout, rte_mp);
530 * Check given socket id is valid or not with NUMA mode,
531 * if valid, return 0, else return -1
534 check_socket_id(const unsigned int socket_id)
536 static int warning_once = 0;
538 if (new_socket_id(socket_id)) {
539 if (!warning_once && numa_support)
540 printf("Warning: NUMA should be configured manually by"
541 " using --port-numa-config and"
542 " --ring-numa-config parameters along with"
554 struct rte_port *port;
555 struct rte_mempool *mbp;
556 unsigned int nb_mbuf_per_pool;
558 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
560 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
563 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
564 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
565 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
568 /* Configuration of logical cores. */
569 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
570 sizeof(struct fwd_lcore *) * nb_lcores,
571 RTE_CACHE_LINE_SIZE);
572 if (fwd_lcores == NULL) {
573 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
574 "failed\n", nb_lcores);
576 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
577 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
578 sizeof(struct fwd_lcore),
579 RTE_CACHE_LINE_SIZE);
580 if (fwd_lcores[lc_id] == NULL) {
581 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
584 fwd_lcores[lc_id]->cpuid_idx = lc_id;
587 RTE_ETH_FOREACH_DEV(pid) {
589 rte_eth_dev_info_get(pid, &port->dev_info);
592 if (port_numa[pid] != NUMA_NO_CONFIG)
593 port_per_socket[port_numa[pid]]++;
595 uint32_t socket_id = rte_eth_dev_socket_id(pid);
597 /* if socket_id is invalid, set to 0 */
598 if (check_socket_id(socket_id) < 0)
600 port_per_socket[socket_id]++;
604 /* set flag to initialize port/queue */
605 port->need_reconfig = 1;
606 port->need_reconfig_queues = 1;
610 * Create pools of mbuf.
611 * If NUMA support is disabled, create a single pool of mbuf in
612 * socket 0 memory by default.
613 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
615 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
616 * nb_txd can be configured at run time.
618 if (param_total_num_mbufs)
619 nb_mbuf_per_pool = param_total_num_mbufs;
621 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
622 (nb_lcores * mb_mempool_cache) +
623 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
624 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
630 for (i = 0; i < num_sockets; i++)
631 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
634 if (socket_num == UMA_NO_CONFIG)
635 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
637 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
644 * Records which Mbuf pool to use by each logical core, if needed.
646 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
647 mbp = mbuf_pool_find(
648 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
651 mbp = mbuf_pool_find(0);
652 fwd_lcores[lc_id]->mbp = mbp;
655 /* Configuration of packet forwarding streams. */
656 if (init_fwd_streams() < 0)
657 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
664 reconfig(portid_t new_port_id, unsigned socket_id)
666 struct rte_port *port;
668 /* Reconfiguration of Ethernet ports. */
669 port = &ports[new_port_id];
670 rte_eth_dev_info_get(new_port_id, &port->dev_info);
672 /* set flag to initialize port/queue */
673 port->need_reconfig = 1;
674 port->need_reconfig_queues = 1;
675 port->socket_id = socket_id;
682 init_fwd_streams(void)
685 struct rte_port *port;
686 streamid_t sm_id, nb_fwd_streams_new;
689 /* set socket id according to numa or not */
690 RTE_ETH_FOREACH_DEV(pid) {
692 if (nb_rxq > port->dev_info.max_rx_queues) {
693 printf("Fail: nb_rxq(%d) is greater than "
694 "max_rx_queues(%d)\n", nb_rxq,
695 port->dev_info.max_rx_queues);
698 if (nb_txq > port->dev_info.max_tx_queues) {
699 printf("Fail: nb_txq(%d) is greater than "
700 "max_tx_queues(%d)\n", nb_txq,
701 port->dev_info.max_tx_queues);
705 if (port_numa[pid] != NUMA_NO_CONFIG)
706 port->socket_id = port_numa[pid];
708 port->socket_id = rte_eth_dev_socket_id(pid);
710 /* if socket_id is invalid, set to 0 */
711 if (check_socket_id(port->socket_id) < 0)
716 if (socket_num == UMA_NO_CONFIG)
719 port->socket_id = socket_num;
723 q = RTE_MAX(nb_rxq, nb_txq);
725 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
728 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
729 if (nb_fwd_streams_new == nb_fwd_streams)
732 if (fwd_streams != NULL) {
733 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
734 if (fwd_streams[sm_id] == NULL)
736 rte_free(fwd_streams[sm_id]);
737 fwd_streams[sm_id] = NULL;
739 rte_free(fwd_streams);
744 nb_fwd_streams = nb_fwd_streams_new;
745 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
746 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
747 if (fwd_streams == NULL)
748 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
749 "failed\n", nb_fwd_streams);
751 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
752 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
753 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
754 if (fwd_streams[sm_id] == NULL)
755 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
762 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
764 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
766 unsigned int total_burst;
767 unsigned int nb_burst;
768 unsigned int burst_stats[3];
769 uint16_t pktnb_stats[3];
771 int burst_percent[3];
774 * First compute the total number of packet bursts and the
775 * two highest numbers of bursts of the same number of packets.
778 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
779 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
780 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
781 nb_burst = pbs->pkt_burst_spread[nb_pkt];
784 total_burst += nb_burst;
785 if (nb_burst > burst_stats[0]) {
786 burst_stats[1] = burst_stats[0];
787 pktnb_stats[1] = pktnb_stats[0];
788 burst_stats[0] = nb_burst;
789 pktnb_stats[0] = nb_pkt;
792 if (total_burst == 0)
794 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
795 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
796 burst_percent[0], (int) pktnb_stats[0]);
797 if (burst_stats[0] == total_burst) {
801 if (burst_stats[0] + burst_stats[1] == total_burst) {
802 printf(" + %d%% of %d pkts]\n",
803 100 - burst_percent[0], pktnb_stats[1]);
806 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
807 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
808 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
809 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
812 printf(" + %d%% of %d pkts + %d%% of others]\n",
813 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
815 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
818 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
820 struct rte_port *port;
823 static const char *fwd_stats_border = "----------------------";
825 port = &ports[port_id];
826 printf("\n %s Forward statistics for port %-2d %s\n",
827 fwd_stats_border, port_id, fwd_stats_border);
829 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
830 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
832 stats->ipackets, stats->imissed,
833 (uint64_t) (stats->ipackets + stats->imissed));
835 if (cur_fwd_eng == &csum_fwd_engine)
836 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
837 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
838 if ((stats->ierrors + stats->rx_nombuf) > 0) {
839 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
840 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
843 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
845 stats->opackets, port->tx_dropped,
846 (uint64_t) (stats->opackets + port->tx_dropped));
849 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
851 stats->ipackets, stats->imissed,
852 (uint64_t) (stats->ipackets + stats->imissed));
854 if (cur_fwd_eng == &csum_fwd_engine)
855 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
856 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
857 if ((stats->ierrors + stats->rx_nombuf) > 0) {
858 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
859 printf(" RX-nombufs: %14"PRIu64"\n",
863 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
865 stats->opackets, port->tx_dropped,
866 (uint64_t) (stats->opackets + port->tx_dropped));
869 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
871 pkt_burst_stats_display("RX",
872 &port->rx_stream->rx_burst_stats);
874 pkt_burst_stats_display("TX",
875 &port->tx_stream->tx_burst_stats);
878 if (port->rx_queue_stats_mapping_enabled) {
880 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
881 printf(" Stats reg %2d RX-packets:%14"PRIu64
882 " RX-errors:%14"PRIu64
883 " RX-bytes:%14"PRIu64"\n",
884 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
888 if (port->tx_queue_stats_mapping_enabled) {
889 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
890 printf(" Stats reg %2d TX-packets:%14"PRIu64
891 " TX-bytes:%14"PRIu64"\n",
892 i, stats->q_opackets[i], stats->q_obytes[i]);
896 printf(" %s--------------------------------%s\n",
897 fwd_stats_border, fwd_stats_border);
901 fwd_stream_stats_display(streamid_t stream_id)
903 struct fwd_stream *fs;
904 static const char *fwd_top_stats_border = "-------";
906 fs = fwd_streams[stream_id];
907 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
908 (fs->fwd_dropped == 0))
910 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
911 "TX Port=%2d/Queue=%2d %s\n",
912 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
913 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
914 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
915 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
917 /* if checksum mode */
918 if (cur_fwd_eng == &csum_fwd_engine) {
919 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
920 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
923 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
924 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
925 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
930 flush_fwd_rx_queues(void)
932 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
939 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
940 uint64_t timer_period;
942 /* convert to number of cycles */
943 timer_period = rte_get_timer_hz(); /* 1 second timeout */
945 for (j = 0; j < 2; j++) {
946 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
947 for (rxq = 0; rxq < nb_rxq; rxq++) {
948 port_id = fwd_ports_ids[rxp];
950 * testpmd can stuck in the below do while loop
951 * if rte_eth_rx_burst() always returns nonzero
952 * packets. So timer is added to exit this loop
953 * after 1sec timer expiry.
955 prev_tsc = rte_rdtsc();
957 nb_rx = rte_eth_rx_burst(port_id, rxq,
958 pkts_burst, MAX_PKT_BURST);
959 for (i = 0; i < nb_rx; i++)
960 rte_pktmbuf_free(pkts_burst[i]);
962 cur_tsc = rte_rdtsc();
963 diff_tsc = cur_tsc - prev_tsc;
964 timer_tsc += diff_tsc;
965 } while ((nb_rx > 0) &&
966 (timer_tsc < timer_period));
970 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
975 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
977 struct fwd_stream **fsm;
980 #ifdef RTE_LIBRTE_BITRATE
981 uint64_t tics_per_1sec;
983 uint64_t tics_current;
984 uint8_t idx_port, cnt_ports;
986 cnt_ports = rte_eth_dev_count();
987 tics_datum = rte_rdtsc();
988 tics_per_1sec = rte_get_timer_hz();
990 fsm = &fwd_streams[fc->stream_idx];
991 nb_fs = fc->stream_nb;
993 for (sm_id = 0; sm_id < nb_fs; sm_id++)
994 (*pkt_fwd)(fsm[sm_id]);
995 #ifdef RTE_LIBRTE_BITRATE
996 if (bitrate_enabled != 0 &&
997 bitrate_lcore_id == rte_lcore_id()) {
998 tics_current = rte_rdtsc();
999 if (tics_current - tics_datum >= tics_per_1sec) {
1000 /* Periodic bitrate calculation */
1002 idx_port < cnt_ports;
1004 rte_stats_bitrate_calc(bitrate_data,
1006 tics_datum = tics_current;
1010 #ifdef RTE_LIBRTE_LATENCY_STATS
1011 if (latencystats_enabled != 0 &&
1012 latencystats_lcore_id == rte_lcore_id())
1013 rte_latencystats_update();
1016 } while (! fc->stopped);
1020 start_pkt_forward_on_core(void *fwd_arg)
1022 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1023 cur_fwd_config.fwd_eng->packet_fwd);
1028 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1029 * Used to start communication flows in network loopback test configurations.
1032 run_one_txonly_burst_on_core(void *fwd_arg)
1034 struct fwd_lcore *fwd_lc;
1035 struct fwd_lcore tmp_lcore;
1037 fwd_lc = (struct fwd_lcore *) fwd_arg;
1038 tmp_lcore = *fwd_lc;
1039 tmp_lcore.stopped = 1;
1040 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1045 * Launch packet forwarding:
1046 * - Setup per-port forwarding context.
1047 * - launch logical cores with their forwarding configuration.
1050 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1052 port_fwd_begin_t port_fwd_begin;
1057 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1058 if (port_fwd_begin != NULL) {
1059 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1060 (*port_fwd_begin)(fwd_ports_ids[i]);
1062 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1063 lc_id = fwd_lcores_cpuids[i];
1064 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1065 fwd_lcores[i]->stopped = 0;
1066 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1067 fwd_lcores[i], lc_id);
1069 printf("launch lcore %u failed - diag=%d\n",
1076 * Launch packet forwarding configuration.
1079 start_packet_forwarding(int with_tx_first)
1081 port_fwd_begin_t port_fwd_begin;
1082 port_fwd_end_t port_fwd_end;
1083 struct rte_port *port;
1088 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1089 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1091 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1092 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1094 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1095 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1096 (!nb_rxq || !nb_txq))
1097 rte_exit(EXIT_FAILURE,
1098 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1099 cur_fwd_eng->fwd_mode_name);
1101 if (all_ports_started() == 0) {
1102 printf("Not all ports were started\n");
1105 if (test_done == 0) {
1106 printf("Packet forwarding already started\n");
1110 if (init_fwd_streams() < 0) {
1111 printf("Fail from init_fwd_streams()\n");
1116 for (i = 0; i < nb_fwd_ports; i++) {
1117 pt_id = fwd_ports_ids[i];
1118 port = &ports[pt_id];
1119 if (!port->dcb_flag) {
1120 printf("In DCB mode, all forwarding ports must "
1121 "be configured in this mode.\n");
1125 if (nb_fwd_lcores == 1) {
1126 printf("In DCB mode,the nb forwarding cores "
1127 "should be larger than 1.\n");
1134 flush_fwd_rx_queues();
1137 pkt_fwd_config_display(&cur_fwd_config);
1138 rxtx_config_display();
1140 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1141 pt_id = fwd_ports_ids[i];
1142 port = &ports[pt_id];
1143 rte_eth_stats_get(pt_id, &port->stats);
1144 port->tx_dropped = 0;
1146 map_port_queue_stats_mapping_registers(pt_id, port);
1148 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1149 fwd_streams[sm_id]->rx_packets = 0;
1150 fwd_streams[sm_id]->tx_packets = 0;
1151 fwd_streams[sm_id]->fwd_dropped = 0;
1152 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1153 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1155 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1156 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1157 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1158 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1159 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1161 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1162 fwd_streams[sm_id]->core_cycles = 0;
1165 if (with_tx_first) {
1166 port_fwd_begin = tx_only_engine.port_fwd_begin;
1167 if (port_fwd_begin != NULL) {
1168 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1169 (*port_fwd_begin)(fwd_ports_ids[i]);
1171 while (with_tx_first--) {
1172 launch_packet_forwarding(
1173 run_one_txonly_burst_on_core);
1174 rte_eal_mp_wait_lcore();
1176 port_fwd_end = tx_only_engine.port_fwd_end;
1177 if (port_fwd_end != NULL) {
1178 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1179 (*port_fwd_end)(fwd_ports_ids[i]);
1182 launch_packet_forwarding(start_pkt_forward_on_core);
1186 stop_packet_forwarding(void)
1188 struct rte_eth_stats stats;
1189 struct rte_port *port;
1190 port_fwd_end_t port_fwd_end;
1195 uint64_t total_recv;
1196 uint64_t total_xmit;
1197 uint64_t total_rx_dropped;
1198 uint64_t total_tx_dropped;
1199 uint64_t total_rx_nombuf;
1200 uint64_t tx_dropped;
1201 uint64_t rx_bad_ip_csum;
1202 uint64_t rx_bad_l4_csum;
1203 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1204 uint64_t fwd_cycles;
1206 static const char *acc_stats_border = "+++++++++++++++";
1209 printf("Packet forwarding not started\n");
1212 printf("Telling cores to stop...");
1213 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1214 fwd_lcores[lc_id]->stopped = 1;
1215 printf("\nWaiting for lcores to finish...\n");
1216 rte_eal_mp_wait_lcore();
1217 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1218 if (port_fwd_end != NULL) {
1219 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1220 pt_id = fwd_ports_ids[i];
1221 (*port_fwd_end)(pt_id);
1224 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1227 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1228 if (cur_fwd_config.nb_fwd_streams >
1229 cur_fwd_config.nb_fwd_ports) {
1230 fwd_stream_stats_display(sm_id);
1231 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1232 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1234 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1236 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1239 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1240 tx_dropped = (uint64_t) (tx_dropped +
1241 fwd_streams[sm_id]->fwd_dropped);
1242 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1245 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1246 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1247 fwd_streams[sm_id]->rx_bad_ip_csum);
1248 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1252 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1253 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1254 fwd_streams[sm_id]->rx_bad_l4_csum);
1255 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1258 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1259 fwd_cycles = (uint64_t) (fwd_cycles +
1260 fwd_streams[sm_id]->core_cycles);
1265 total_rx_dropped = 0;
1266 total_tx_dropped = 0;
1267 total_rx_nombuf = 0;
1268 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1269 pt_id = fwd_ports_ids[i];
1271 port = &ports[pt_id];
1272 rte_eth_stats_get(pt_id, &stats);
1273 stats.ipackets -= port->stats.ipackets;
1274 port->stats.ipackets = 0;
1275 stats.opackets -= port->stats.opackets;
1276 port->stats.opackets = 0;
1277 stats.ibytes -= port->stats.ibytes;
1278 port->stats.ibytes = 0;
1279 stats.obytes -= port->stats.obytes;
1280 port->stats.obytes = 0;
1281 stats.imissed -= port->stats.imissed;
1282 port->stats.imissed = 0;
1283 stats.oerrors -= port->stats.oerrors;
1284 port->stats.oerrors = 0;
1285 stats.rx_nombuf -= port->stats.rx_nombuf;
1286 port->stats.rx_nombuf = 0;
1288 total_recv += stats.ipackets;
1289 total_xmit += stats.opackets;
1290 total_rx_dropped += stats.imissed;
1291 total_tx_dropped += port->tx_dropped;
1292 total_rx_nombuf += stats.rx_nombuf;
1294 fwd_port_stats_display(pt_id, &stats);
1296 printf("\n %s Accumulated forward statistics for all ports"
1298 acc_stats_border, acc_stats_border);
1299 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1301 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1303 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1304 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1305 if (total_rx_nombuf > 0)
1306 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1307 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1309 acc_stats_border, acc_stats_border);
1310 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1312 printf("\n CPU cycles/packet=%u (total cycles="
1313 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1314 (unsigned int)(fwd_cycles / total_recv),
1315 fwd_cycles, total_recv);
1317 printf("\nDone.\n");
1322 dev_set_link_up(portid_t pid)
1324 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1325 printf("\nSet link up fail.\n");
1329 dev_set_link_down(portid_t pid)
1331 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1332 printf("\nSet link down fail.\n");
1336 all_ports_started(void)
1339 struct rte_port *port;
1341 RTE_ETH_FOREACH_DEV(pi) {
1343 /* Check if there is a port which is not started */
1344 if ((port->port_status != RTE_PORT_STARTED) &&
1345 (port->slave_flag == 0))
1349 /* No port is not started */
1354 all_ports_stopped(void)
1357 struct rte_port *port;
1359 RTE_ETH_FOREACH_DEV(pi) {
1361 if ((port->port_status != RTE_PORT_STOPPED) &&
1362 (port->slave_flag == 0))
1370 port_is_started(portid_t port_id)
1372 if (port_id_is_invalid(port_id, ENABLED_WARN))
1375 if (ports[port_id].port_status != RTE_PORT_STARTED)
1382 port_is_closed(portid_t port_id)
1384 if (port_id_is_invalid(port_id, ENABLED_WARN))
1387 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1394 start_port(portid_t pid)
1396 int diag, need_check_link_status = -1;
1399 struct rte_port *port;
1400 struct ether_addr mac_addr;
1401 enum rte_eth_event_type event_type;
1403 if (port_id_is_invalid(pid, ENABLED_WARN))
1408 RTE_ETH_FOREACH_DEV(pi) {
1409 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1412 need_check_link_status = 0;
1414 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1415 RTE_PORT_HANDLING) == 0) {
1416 printf("Port %d is now not stopped\n", pi);
1420 if (port->need_reconfig > 0) {
1421 port->need_reconfig = 0;
1423 if (flow_isolate_all) {
1424 int ret = port_flow_isolate(pi, 1);
1426 printf("Failed to apply isolated"
1427 " mode on port %d\n", pi);
1432 printf("Configuring Port %d (socket %u)\n", pi,
1434 /* configure port */
1435 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1438 if (rte_atomic16_cmpset(&(port->port_status),
1439 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1440 printf("Port %d can not be set back "
1441 "to stopped\n", pi);
1442 printf("Fail to configure port %d\n", pi);
1443 /* try to reconfigure port next time */
1444 port->need_reconfig = 1;
1448 if (port->need_reconfig_queues > 0) {
1449 port->need_reconfig_queues = 0;
1450 /* setup tx queues */
1451 for (qi = 0; qi < nb_txq; qi++) {
1452 if ((numa_support) &&
1453 (txring_numa[pi] != NUMA_NO_CONFIG))
1454 diag = rte_eth_tx_queue_setup(pi, qi,
1455 nb_txd,txring_numa[pi],
1458 diag = rte_eth_tx_queue_setup(pi, qi,
1459 nb_txd,port->socket_id,
1465 /* Fail to setup tx queue, return */
1466 if (rte_atomic16_cmpset(&(port->port_status),
1468 RTE_PORT_STOPPED) == 0)
1469 printf("Port %d can not be set back "
1470 "to stopped\n", pi);
1471 printf("Fail to configure port %d tx queues\n", pi);
1472 /* try to reconfigure queues next time */
1473 port->need_reconfig_queues = 1;
1476 /* setup rx queues */
1477 for (qi = 0; qi < nb_rxq; qi++) {
1478 if ((numa_support) &&
1479 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1480 struct rte_mempool * mp =
1481 mbuf_pool_find(rxring_numa[pi]);
1483 printf("Failed to setup RX queue:"
1484 "No mempool allocation"
1485 " on the socket %d\n",
1490 diag = rte_eth_rx_queue_setup(pi, qi,
1491 nb_rxd,rxring_numa[pi],
1492 &(port->rx_conf),mp);
1494 struct rte_mempool *mp =
1495 mbuf_pool_find(port->socket_id);
1497 printf("Failed to setup RX queue:"
1498 "No mempool allocation"
1499 " on the socket %d\n",
1503 diag = rte_eth_rx_queue_setup(pi, qi,
1504 nb_rxd,port->socket_id,
1505 &(port->rx_conf), mp);
1510 /* Fail to setup rx queue, return */
1511 if (rte_atomic16_cmpset(&(port->port_status),
1513 RTE_PORT_STOPPED) == 0)
1514 printf("Port %d can not be set back "
1515 "to stopped\n", pi);
1516 printf("Fail to configure port %d rx queues\n", pi);
1517 /* try to reconfigure queues next time */
1518 port->need_reconfig_queues = 1;
1523 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1524 event_type < RTE_ETH_EVENT_MAX;
1526 diag = rte_eth_dev_callback_register(pi,
1531 printf("Failed to setup even callback for event %d\n",
1538 if (rte_eth_dev_start(pi) < 0) {
1539 printf("Fail to start port %d\n", pi);
1541 /* Fail to setup rx queue, return */
1542 if (rte_atomic16_cmpset(&(port->port_status),
1543 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1544 printf("Port %d can not be set back to "
1549 if (rte_atomic16_cmpset(&(port->port_status),
1550 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1551 printf("Port %d can not be set into started\n", pi);
1553 rte_eth_macaddr_get(pi, &mac_addr);
1554 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1555 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1556 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1557 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1559 /* at least one port started, need checking link status */
1560 need_check_link_status = 1;
1563 if (need_check_link_status == 1 && !no_link_check)
1564 check_all_ports_link_status(RTE_PORT_ALL);
1565 else if (need_check_link_status == 0)
1566 printf("Please stop the ports first\n");
1573 stop_port(portid_t pid)
1576 struct rte_port *port;
1577 int need_check_link_status = 0;
1584 if (port_id_is_invalid(pid, ENABLED_WARN))
1587 printf("Stopping ports...\n");
1589 RTE_ETH_FOREACH_DEV(pi) {
1590 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1593 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1594 printf("Please remove port %d from forwarding configuration.\n", pi);
1598 if (port_is_bonding_slave(pi)) {
1599 printf("Please remove port %d from bonded device.\n", pi);
1604 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1605 RTE_PORT_HANDLING) == 0)
1608 rte_eth_dev_stop(pi);
1610 if (rte_atomic16_cmpset(&(port->port_status),
1611 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1612 printf("Port %d can not be set into stopped\n", pi);
1613 need_check_link_status = 1;
1615 if (need_check_link_status && !no_link_check)
1616 check_all_ports_link_status(RTE_PORT_ALL);
1622 close_port(portid_t pid)
1625 struct rte_port *port;
1627 if (port_id_is_invalid(pid, ENABLED_WARN))
1630 printf("Closing ports...\n");
1632 RTE_ETH_FOREACH_DEV(pi) {
1633 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1636 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1637 printf("Please remove port %d from forwarding configuration.\n", pi);
1641 if (port_is_bonding_slave(pi)) {
1642 printf("Please remove port %d from bonded device.\n", pi);
1647 if (rte_atomic16_cmpset(&(port->port_status),
1648 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1649 printf("Port %d is already closed\n", pi);
1653 if (rte_atomic16_cmpset(&(port->port_status),
1654 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1655 printf("Port %d is now not stopped\n", pi);
1659 if (port->flow_list)
1660 port_flow_flush(pi);
1661 rte_eth_dev_close(pi);
1663 if (rte_atomic16_cmpset(&(port->port_status),
1664 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1665 printf("Port %d cannot be set to closed\n", pi);
1672 reset_port(portid_t pid)
1676 struct rte_port *port;
1678 if (port_id_is_invalid(pid, ENABLED_WARN))
1681 printf("Resetting ports...\n");
1683 RTE_ETH_FOREACH_DEV(pi) {
1684 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1687 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1688 printf("Please remove port %d from forwarding "
1689 "configuration.\n", pi);
1693 if (port_is_bonding_slave(pi)) {
1694 printf("Please remove port %d from bonded device.\n",
1699 diag = rte_eth_dev_reset(pi);
1702 port->need_reconfig = 1;
1703 port->need_reconfig_queues = 1;
1705 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1713 attach_port(char *identifier)
1716 unsigned int socket_id;
1718 printf("Attaching a new port...\n");
1720 if (identifier == NULL) {
1721 printf("Invalid parameters are specified\n");
1725 if (rte_eth_dev_attach(identifier, &pi))
1728 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1729 /* if socket_id is invalid, set to 0 */
1730 if (check_socket_id(socket_id) < 0)
1732 reconfig(pi, socket_id);
1733 rte_eth_promiscuous_enable(pi);
1735 nb_ports = rte_eth_dev_count();
1737 ports[pi].port_status = RTE_PORT_STOPPED;
1739 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1744 detach_port(uint8_t port_id)
1746 char name[RTE_ETH_NAME_MAX_LEN];
1748 printf("Detaching a port...\n");
1750 if (!port_is_closed(port_id)) {
1751 printf("Please close port first\n");
1755 if (ports[port_id].flow_list)
1756 port_flow_flush(port_id);
1758 if (rte_eth_dev_detach(port_id, name)) {
1759 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1763 nb_ports = rte_eth_dev_count();
1765 printf("Port '%s' is detached. Now total ports is %d\n",
1777 stop_packet_forwarding();
1779 if (ports != NULL) {
1781 RTE_ETH_FOREACH_DEV(pt_id) {
1782 printf("\nShutting down port %d...\n", pt_id);
1788 printf("\nBye...\n");
1791 typedef void (*cmd_func_t)(void);
1792 struct pmd_test_command {
1793 const char *cmd_name;
1794 cmd_func_t cmd_func;
1797 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1799 /* Check the link status of all ports in up to 9s, and print them finally */
1801 check_all_ports_link_status(uint32_t port_mask)
1803 #define CHECK_INTERVAL 100 /* 100ms */
1804 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1806 uint8_t count, all_ports_up, print_flag = 0;
1807 struct rte_eth_link link;
1809 printf("Checking link statuses...\n");
1811 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1813 RTE_ETH_FOREACH_DEV(portid) {
1814 if ((port_mask & (1 << portid)) == 0)
1816 memset(&link, 0, sizeof(link));
1817 rte_eth_link_get_nowait(portid, &link);
1818 /* print link status if flag set */
1819 if (print_flag == 1) {
1820 if (link.link_status)
1822 "Port%d Link Up. speed %u Mbps- %s\n",
1823 portid, link.link_speed,
1824 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1825 ("full-duplex") : ("half-duplex\n"));
1827 printf("Port %d Link Down\n", portid);
1830 /* clear all_ports_up flag if any link down */
1831 if (link.link_status == ETH_LINK_DOWN) {
1836 /* after finally printing all link status, get out */
1837 if (print_flag == 1)
1840 if (all_ports_up == 0) {
1842 rte_delay_ms(CHECK_INTERVAL);
1845 /* set the print_flag if all ports up or timeout */
1846 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1856 rmv_event_callback(void *arg)
1858 struct rte_eth_dev *dev;
1859 uint8_t port_id = (intptr_t)arg;
1861 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1862 dev = &rte_eth_devices[port_id];
1865 close_port(port_id);
1866 printf("removing device %s\n", dev->device->name);
1867 if (rte_eal_dev_detach(dev->device))
1868 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1872 /* This function is used by the interrupt thread */
1874 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1877 static const char * const event_desc[] = {
1878 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1879 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1880 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1881 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1882 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1883 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1884 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1885 [RTE_ETH_EVENT_MAX] = NULL,
1888 RTE_SET_USED(param);
1889 RTE_SET_USED(ret_param);
1891 if (type >= RTE_ETH_EVENT_MAX) {
1892 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1893 port_id, __func__, type);
1895 } else if (event_print_mask & (UINT32_C(1) << type)) {
1896 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1902 case RTE_ETH_EVENT_INTR_RMV:
1903 if (rte_eal_alarm_set(100000,
1904 rmv_event_callback, (void *)(intptr_t)port_id))
1905 fprintf(stderr, "Could not set up deferred device removal\n");
1914 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1918 uint8_t mapping_found = 0;
1920 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1921 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1922 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1923 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1924 tx_queue_stats_mappings[i].queue_id,
1925 tx_queue_stats_mappings[i].stats_counter_id);
1932 port->tx_queue_stats_mapping_enabled = 1;
1937 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1941 uint8_t mapping_found = 0;
1943 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1944 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1945 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1946 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1947 rx_queue_stats_mappings[i].queue_id,
1948 rx_queue_stats_mappings[i].stats_counter_id);
1955 port->rx_queue_stats_mapping_enabled = 1;
1960 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1964 diag = set_tx_queue_stats_mapping_registers(pi, port);
1966 if (diag == -ENOTSUP) {
1967 port->tx_queue_stats_mapping_enabled = 0;
1968 printf("TX queue stats mapping not supported port id=%d\n", pi);
1971 rte_exit(EXIT_FAILURE,
1972 "set_tx_queue_stats_mapping_registers "
1973 "failed for port id=%d diag=%d\n",
1977 diag = set_rx_queue_stats_mapping_registers(pi, port);
1979 if (diag == -ENOTSUP) {
1980 port->rx_queue_stats_mapping_enabled = 0;
1981 printf("RX queue stats mapping not supported port id=%d\n", pi);
1984 rte_exit(EXIT_FAILURE,
1985 "set_rx_queue_stats_mapping_registers "
1986 "failed for port id=%d diag=%d\n",
1992 rxtx_port_config(struct rte_port *port)
1994 port->rx_conf = port->dev_info.default_rxconf;
1995 port->tx_conf = port->dev_info.default_txconf;
1997 /* Check if any RX/TX parameters have been passed */
1998 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1999 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2001 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2002 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2004 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2005 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2007 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2008 port->rx_conf.rx_free_thresh = rx_free_thresh;
2010 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2011 port->rx_conf.rx_drop_en = rx_drop_en;
2013 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2014 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2016 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2017 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2019 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2020 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2022 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2023 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2025 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2026 port->tx_conf.tx_free_thresh = tx_free_thresh;
2028 if (txq_flags != RTE_PMD_PARAM_UNSET)
2029 port->tx_conf.txq_flags = txq_flags;
2033 init_port_config(void)
2036 struct rte_port *port;
2038 RTE_ETH_FOREACH_DEV(pid) {
2040 port->dev_conf.rxmode = rx_mode;
2041 port->dev_conf.fdir_conf = fdir_conf;
2043 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2044 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2046 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2047 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2050 if (port->dcb_flag == 0) {
2051 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2052 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2054 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2057 rxtx_port_config(port);
2059 rte_eth_macaddr_get(pid, &port->eth_addr);
2061 map_port_queue_stats_mapping_registers(pid, port);
2062 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2063 rte_pmd_ixgbe_bypass_init(pid);
2066 if (lsc_interrupt &&
2067 (rte_eth_devices[pid].data->dev_flags &
2068 RTE_ETH_DEV_INTR_LSC))
2069 port->dev_conf.intr_conf.lsc = 1;
2070 if (rmv_interrupt &&
2071 (rte_eth_devices[pid].data->dev_flags &
2072 RTE_ETH_DEV_INTR_RMV))
2073 port->dev_conf.intr_conf.rmv = 1;
2077 void set_port_slave_flag(portid_t slave_pid)
2079 struct rte_port *port;
2081 port = &ports[slave_pid];
2082 port->slave_flag = 1;
2085 void clear_port_slave_flag(portid_t slave_pid)
2087 struct rte_port *port;
2089 port = &ports[slave_pid];
2090 port->slave_flag = 0;
2093 uint8_t port_is_bonding_slave(portid_t slave_pid)
2095 struct rte_port *port;
2097 port = &ports[slave_pid];
2098 return port->slave_flag;
2101 const uint16_t vlan_tags[] = {
2102 0, 1, 2, 3, 4, 5, 6, 7,
2103 8, 9, 10, 11, 12, 13, 14, 15,
2104 16, 17, 18, 19, 20, 21, 22, 23,
2105 24, 25, 26, 27, 28, 29, 30, 31
2109 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2110 enum dcb_mode_enable dcb_mode,
2111 enum rte_eth_nb_tcs num_tcs,
2117 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2118 * given above, and the number of traffic classes available for use.
2120 if (dcb_mode == DCB_VT_ENABLED) {
2121 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2122 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2123 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2124 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2126 /* VMDQ+DCB RX and TX configurations */
2127 vmdq_rx_conf->enable_default_pool = 0;
2128 vmdq_rx_conf->default_pool = 0;
2129 vmdq_rx_conf->nb_queue_pools =
2130 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2131 vmdq_tx_conf->nb_queue_pools =
2132 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2134 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2135 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2136 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2137 vmdq_rx_conf->pool_map[i].pools =
2138 1 << (i % vmdq_rx_conf->nb_queue_pools);
2140 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2141 vmdq_rx_conf->dcb_tc[i] = i;
2142 vmdq_tx_conf->dcb_tc[i] = i;
2145 /* set DCB mode of RX and TX of multiple queues */
2146 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2147 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2149 struct rte_eth_dcb_rx_conf *rx_conf =
2150 ð_conf->rx_adv_conf.dcb_rx_conf;
2151 struct rte_eth_dcb_tx_conf *tx_conf =
2152 ð_conf->tx_adv_conf.dcb_tx_conf;
2154 rx_conf->nb_tcs = num_tcs;
2155 tx_conf->nb_tcs = num_tcs;
2157 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2158 rx_conf->dcb_tc[i] = i % num_tcs;
2159 tx_conf->dcb_tc[i] = i % num_tcs;
2161 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2162 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2163 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2167 eth_conf->dcb_capability_en =
2168 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2170 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2176 init_port_dcb_config(portid_t pid,
2177 enum dcb_mode_enable dcb_mode,
2178 enum rte_eth_nb_tcs num_tcs,
2181 struct rte_eth_conf port_conf;
2182 struct rte_port *rte_port;
2186 rte_port = &ports[pid];
2188 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2189 /* Enter DCB configuration status */
2192 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2193 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2196 port_conf.rxmode.hw_vlan_filter = 1;
2199 * Write the configuration into the device.
2200 * Set the numbers of RX & TX queues to 0, so
2201 * the RX & TX queues will not be setup.
2203 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2205 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2207 /* If dev_info.vmdq_pool_base is greater than 0,
2208 * the queue id of vmdq pools is started after pf queues.
2210 if (dcb_mode == DCB_VT_ENABLED &&
2211 rte_port->dev_info.vmdq_pool_base > 0) {
2212 printf("VMDQ_DCB multi-queue mode is nonsensical"
2213 " for port %d.", pid);
2217 /* Assume the ports in testpmd have the same dcb capability
2218 * and has the same number of rxq and txq in dcb mode
2220 if (dcb_mode == DCB_VT_ENABLED) {
2221 if (rte_port->dev_info.max_vfs > 0) {
2222 nb_rxq = rte_port->dev_info.nb_rx_queues;
2223 nb_txq = rte_port->dev_info.nb_tx_queues;
2225 nb_rxq = rte_port->dev_info.max_rx_queues;
2226 nb_txq = rte_port->dev_info.max_tx_queues;
2229 /*if vt is disabled, use all pf queues */
2230 if (rte_port->dev_info.vmdq_pool_base == 0) {
2231 nb_rxq = rte_port->dev_info.max_rx_queues;
2232 nb_txq = rte_port->dev_info.max_tx_queues;
2234 nb_rxq = (queueid_t)num_tcs;
2235 nb_txq = (queueid_t)num_tcs;
2239 rx_free_thresh = 64;
2241 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2243 rxtx_port_config(rte_port);
2245 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2246 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2247 rx_vft_set(pid, vlan_tags[i], 1);
2249 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2250 map_port_queue_stats_mapping_registers(pid, rte_port);
2252 rte_port->dcb_flag = 1;
2260 /* Configuration of Ethernet ports. */
2261 ports = rte_zmalloc("testpmd: ports",
2262 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2263 RTE_CACHE_LINE_SIZE);
2264 if (ports == NULL) {
2265 rte_exit(EXIT_FAILURE,
2266 "rte_zmalloc(%d struct rte_port) failed\n",
2282 const char clr[] = { 27, '[', '2', 'J', '\0' };
2283 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2285 /* Clear screen and move to top left */
2286 printf("%s%s", clr, top_left);
2288 printf("\nPort statistics ====================================");
2289 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2290 nic_stats_display(fwd_ports_ids[i]);
2294 signal_handler(int signum)
2296 if (signum == SIGINT || signum == SIGTERM) {
2297 printf("\nSignal %d received, preparing to exit...\n",
2299 #ifdef RTE_LIBRTE_PDUMP
2300 /* uninitialize packet capture framework */
2303 #ifdef RTE_LIBRTE_LATENCY_STATS
2304 rte_latencystats_uninit();
2307 /* exit with the expected status */
2308 signal(signum, SIG_DFL);
2309 kill(getpid(), signum);
2314 main(int argc, char** argv)
2319 signal(SIGINT, signal_handler);
2320 signal(SIGTERM, signal_handler);
2322 diag = rte_eal_init(argc, argv);
2324 rte_panic("Cannot init EAL\n");
2326 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2327 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2331 #ifdef RTE_LIBRTE_PDUMP
2332 /* initialize packet capture framework */
2333 rte_pdump_init(NULL);
2336 nb_ports = (portid_t) rte_eth_dev_count();
2338 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2340 /* allocate port structures, and init them */
2343 set_def_fwd_config();
2345 rte_panic("Empty set of forwarding logical cores - check the "
2346 "core mask supplied in the command parameters\n");
2348 /* Bitrate/latency stats disabled by default */
2349 #ifdef RTE_LIBRTE_BITRATE
2350 bitrate_enabled = 0;
2352 #ifdef RTE_LIBRTE_LATENCY_STATS
2353 latencystats_enabled = 0;
2359 launch_args_parse(argc, argv);
2361 if (tx_first && interactive)
2362 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2363 "interactive mode.\n");
2365 if (tx_first && lsc_interrupt) {
2366 printf("Warning: lsc_interrupt needs to be off when "
2367 " using tx_first. Disabling.\n");
2371 if (!nb_rxq && !nb_txq)
2372 printf("Warning: Either rx or tx queues should be non-zero\n");
2374 if (nb_rxq > 1 && nb_rxq > nb_txq)
2375 printf("Warning: nb_rxq=%d enables RSS configuration, "
2376 "but nb_txq=%d will prevent to fully test it.\n",
2380 if (start_port(RTE_PORT_ALL) != 0)
2381 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2383 /* set all ports to promiscuous mode by default */
2384 RTE_ETH_FOREACH_DEV(port_id)
2385 rte_eth_promiscuous_enable(port_id);
2387 /* Init metrics library */
2388 rte_metrics_init(rte_socket_id());
2390 #ifdef RTE_LIBRTE_LATENCY_STATS
2391 if (latencystats_enabled != 0) {
2392 int ret = rte_latencystats_init(1, NULL);
2394 printf("Warning: latencystats init()"
2395 " returned error %d\n", ret);
2396 printf("Latencystats running on lcore %d\n",
2397 latencystats_lcore_id);
2401 /* Setup bitrate stats */
2402 #ifdef RTE_LIBRTE_BITRATE
2403 if (bitrate_enabled != 0) {
2404 bitrate_data = rte_stats_bitrate_create();
2405 if (bitrate_data == NULL)
2406 rte_exit(EXIT_FAILURE,
2407 "Could not allocate bitrate data.\n");
2408 rte_stats_bitrate_reg(bitrate_data);
2412 #ifdef RTE_LIBRTE_CMDLINE
2413 if (strlen(cmdline_filename) != 0)
2414 cmdline_read_from_file(cmdline_filename);
2416 if (interactive == 1) {
2418 printf("Start automatic packet forwarding\n");
2419 start_packet_forwarding(0);
2429 printf("No commandline core given, start packet forwarding\n");
2430 start_packet_forwarding(tx_first);
2431 if (stats_period != 0) {
2432 uint64_t prev_time = 0, cur_time, diff_time = 0;
2433 uint64_t timer_period;
2435 /* Convert to number of cycles */
2436 timer_period = stats_period * rte_get_timer_hz();
2439 cur_time = rte_get_timer_cycles();
2440 diff_time += cur_time - prev_time;
2442 if (diff_time >= timer_period) {
2444 /* Reset the timer */
2447 /* Sleep to avoid unnecessary checks */
2448 prev_time = cur_time;
2453 printf("Press enter to exit\n");
2454 rc = read(0, &c, 1);