4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
101 * NUMA support configuration.
102 * When set, the NUMA support attempts to dispatch the allocation of the
103 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104 * probed ports among the CPU sockets 0 and 1.
105 * Otherwise, all memory is allocated from CPU socket 0.
107 uint8_t numa_support = 1; /**< numa enabled by default */
110 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113 uint8_t socket_num = UMA_NO_CONFIG;
116 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
121 * Record the Ethernet address of peer target ports to which packets are
123 * Must be instantiated with the ethernet addresses of peer traffic generator
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
130 * Probed Target Environment.
132 struct rte_port *ports; /**< For all probed ethernet ports. */
133 portid_t nb_ports; /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
138 * Test Forwarding Configuration.
139 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t nb_cfg_ports; /**< Number of configured ports. */
145 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
154 * Forwarding engines.
156 struct fwd_engine * fwd_engines[] = {
165 #ifdef RTE_LIBRTE_IEEE1588
166 &ieee1588_fwd_engine,
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
179 * specified on command-line. */
182 * Configuration of packet segments used by the "txonly" processing engine.
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 TXONLY_DEF_PACKET_LEN,
188 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
203 * Configurable number of RX/TX queues.
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 * Configurable number of RX/TX ring descriptors.
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
216 #define RTE_PMD_PARAM_UNSET -1
218 * Configurable values of RX and TX ring threshold registers.
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 * Configurable value of RX free threshold.
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX drop enable.
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX free threshold.
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX RS bit threshold.
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX queue flags.
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Avoids to check link status when starting/stopping a port.
272 uint8_t no_link_check = 0; /* check by default */
275 * Enable link status change notification
277 uint8_t lsc_interrupt = 1; /* enabled by default */
280 * Enable device removal notification.
282 uint8_t rmv_interrupt = 1; /* enabled by default */
285 * NIC bypass mode configuration options.
287 #ifdef RTE_NIC_BYPASS
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
294 #ifdef RTE_LIBRTE_LATENCY_STATS
297 * Set when latency stats is enabled in the commandline
299 uint8_t latencystats_enabled;
302 * Lcore ID to serive latency statistics.
304 lcoreid_t latencystats_lcore_id = -1;
309 * Ethernet device configuration.
311 struct rte_eth_rxmode rx_mode = {
312 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
314 .header_split = 0, /**< Header Split disabled. */
315 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
318 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
320 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
323 struct rte_fdir_conf fdir_conf = {
324 .mode = RTE_FDIR_MODE_NONE,
325 .pballoc = RTE_FDIR_PBALLOC_64K,
326 .status = RTE_FDIR_REPORT_STATUS,
328 .vlan_tci_mask = 0x0,
330 .src_ip = 0xFFFFFFFF,
331 .dst_ip = 0xFFFFFFFF,
334 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
337 .src_port_mask = 0xFFFF,
338 .dst_port_mask = 0xFFFF,
339 .mac_addr_byte_mask = 0xFF,
340 .tunnel_type_mask = 1,
341 .tunnel_id_mask = 0xFFFFFFFF,
346 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
348 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
349 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
351 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
352 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
354 uint16_t nb_tx_queue_stats_mappings = 0;
355 uint16_t nb_rx_queue_stats_mappings = 0;
357 unsigned max_socket = 0;
359 #ifdef RTE_LIBRTE_BITRATE
360 /* Bitrate statistics */
361 struct rte_stats_bitrates *bitrate_data;
362 lcoreid_t bitrate_lcore_id;
363 uint8_t bitrate_enabled;
366 /* Forward function declarations */
367 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
368 static void check_all_ports_link_status(uint32_t port_mask);
369 static void eth_event_callback(uint8_t port_id,
370 enum rte_eth_event_type type,
374 * Check if all the ports are started.
375 * If yes, return positive value. If not, return zero.
377 static int all_ports_started(void);
380 * Setup default configuration.
383 set_default_fwd_lcores_config(void)
387 unsigned int sock_num;
390 for (i = 0; i < RTE_MAX_LCORE; i++) {
391 sock_num = rte_lcore_to_socket_id(i) + 1;
392 if (sock_num > max_socket) {
393 if (sock_num > RTE_MAX_NUMA_NODES)
394 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
395 max_socket = sock_num;
397 if (!rte_lcore_is_enabled(i))
399 if (i == rte_get_master_lcore())
401 fwd_lcores_cpuids[nb_lc++] = i;
403 nb_lcores = (lcoreid_t) nb_lc;
404 nb_cfg_lcores = nb_lcores;
409 set_def_peer_eth_addrs(void)
413 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
415 peer_eth_addrs[i].addr_bytes[5] = i;
420 set_default_fwd_ports_config(void)
424 for (pt_id = 0; pt_id < nb_ports; pt_id++)
425 fwd_ports_ids[pt_id] = pt_id;
427 nb_cfg_ports = nb_ports;
428 nb_fwd_ports = nb_ports;
432 set_def_fwd_config(void)
434 set_default_fwd_lcores_config();
435 set_def_peer_eth_addrs();
436 set_default_fwd_ports_config();
440 * Configuration initialisation done once at init time.
443 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
444 unsigned int socket_id)
446 char pool_name[RTE_MEMPOOL_NAMESIZE];
447 struct rte_mempool *rte_mp = NULL;
450 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
451 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
454 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
455 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
457 #ifdef RTE_LIBRTE_PMD_XENVIRT
458 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
459 (unsigned) mb_mempool_cache,
460 sizeof(struct rte_pktmbuf_pool_private),
461 rte_pktmbuf_pool_init, NULL,
462 rte_pktmbuf_init, NULL,
466 /* if the former XEN allocation failed fall back to normal allocation */
467 if (rte_mp == NULL) {
469 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
470 mb_size, (unsigned) mb_mempool_cache,
471 sizeof(struct rte_pktmbuf_pool_private),
476 if (rte_mempool_populate_anon(rte_mp) == 0) {
477 rte_mempool_free(rte_mp);
481 rte_pktmbuf_pool_init(rte_mp, NULL);
482 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
484 /* wrapper to rte_mempool_create() */
485 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
486 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
491 if (rte_mp == NULL) {
492 rte_exit(EXIT_FAILURE,
493 "Creation of mbuf pool for socket %u failed: %s\n",
494 socket_id, rte_strerror(rte_errno));
495 } else if (verbose_level > 0) {
496 rte_mempool_dump(stdout, rte_mp);
501 * Check given socket id is valid or not with NUMA mode,
502 * if valid, return 0, else return -1
505 check_socket_id(const unsigned int socket_id)
507 static int warning_once = 0;
509 if (socket_id >= max_socket) {
510 if (!warning_once && numa_support)
511 printf("Warning: NUMA should be configured manually by"
512 " using --port-numa-config and"
513 " --ring-numa-config parameters along with"
525 struct rte_port *port;
526 struct rte_mempool *mbp;
527 unsigned int nb_mbuf_per_pool;
529 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
531 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
532 /* Configuration of logical cores. */
533 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
534 sizeof(struct fwd_lcore *) * nb_lcores,
535 RTE_CACHE_LINE_SIZE);
536 if (fwd_lcores == NULL) {
537 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
538 "failed\n", nb_lcores);
540 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
541 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
542 sizeof(struct fwd_lcore),
543 RTE_CACHE_LINE_SIZE);
544 if (fwd_lcores[lc_id] == NULL) {
545 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
548 fwd_lcores[lc_id]->cpuid_idx = lc_id;
551 RTE_ETH_FOREACH_DEV(pid) {
553 rte_eth_dev_info_get(pid, &port->dev_info);
556 if (port_numa[pid] != NUMA_NO_CONFIG)
557 port_per_socket[port_numa[pid]]++;
559 uint32_t socket_id = rte_eth_dev_socket_id(pid);
561 /* if socket_id is invalid, set to 0 */
562 if (check_socket_id(socket_id) < 0)
564 port_per_socket[socket_id]++;
568 /* set flag to initialize port/queue */
569 port->need_reconfig = 1;
570 port->need_reconfig_queues = 1;
574 * Create pools of mbuf.
575 * If NUMA support is disabled, create a single pool of mbuf in
576 * socket 0 memory by default.
577 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
579 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
580 * nb_txd can be configured at run time.
582 if (param_total_num_mbufs)
583 nb_mbuf_per_pool = param_total_num_mbufs;
585 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
586 (nb_lcores * mb_mempool_cache) +
587 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
588 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
594 for (i = 0; i < max_socket; i++)
595 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
597 if (socket_num == UMA_NO_CONFIG)
598 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
600 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
607 * Records which Mbuf pool to use by each logical core, if needed.
609 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
610 mbp = mbuf_pool_find(
611 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
614 mbp = mbuf_pool_find(0);
615 fwd_lcores[lc_id]->mbp = mbp;
618 /* Configuration of packet forwarding streams. */
619 if (init_fwd_streams() < 0)
620 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
627 reconfig(portid_t new_port_id, unsigned socket_id)
629 struct rte_port *port;
631 /* Reconfiguration of Ethernet ports. */
632 port = &ports[new_port_id];
633 rte_eth_dev_info_get(new_port_id, &port->dev_info);
635 /* set flag to initialize port/queue */
636 port->need_reconfig = 1;
637 port->need_reconfig_queues = 1;
638 port->socket_id = socket_id;
645 init_fwd_streams(void)
648 struct rte_port *port;
649 streamid_t sm_id, nb_fwd_streams_new;
652 /* set socket id according to numa or not */
653 RTE_ETH_FOREACH_DEV(pid) {
655 if (nb_rxq > port->dev_info.max_rx_queues) {
656 printf("Fail: nb_rxq(%d) is greater than "
657 "max_rx_queues(%d)\n", nb_rxq,
658 port->dev_info.max_rx_queues);
661 if (nb_txq > port->dev_info.max_tx_queues) {
662 printf("Fail: nb_txq(%d) is greater than "
663 "max_tx_queues(%d)\n", nb_txq,
664 port->dev_info.max_tx_queues);
668 if (port_numa[pid] != NUMA_NO_CONFIG)
669 port->socket_id = port_numa[pid];
671 port->socket_id = rte_eth_dev_socket_id(pid);
673 /* if socket_id is invalid, set to 0 */
674 if (check_socket_id(port->socket_id) < 0)
679 if (socket_num == UMA_NO_CONFIG)
682 port->socket_id = socket_num;
686 q = RTE_MAX(nb_rxq, nb_txq);
688 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
691 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
692 if (nb_fwd_streams_new == nb_fwd_streams)
695 if (fwd_streams != NULL) {
696 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
697 if (fwd_streams[sm_id] == NULL)
699 rte_free(fwd_streams[sm_id]);
700 fwd_streams[sm_id] = NULL;
702 rte_free(fwd_streams);
707 nb_fwd_streams = nb_fwd_streams_new;
708 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
709 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
710 if (fwd_streams == NULL)
711 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
712 "failed\n", nb_fwd_streams);
714 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
715 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
716 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
717 if (fwd_streams[sm_id] == NULL)
718 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
725 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
727 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
729 unsigned int total_burst;
730 unsigned int nb_burst;
731 unsigned int burst_stats[3];
732 uint16_t pktnb_stats[3];
734 int burst_percent[3];
737 * First compute the total number of packet bursts and the
738 * two highest numbers of bursts of the same number of packets.
741 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
742 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
743 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
744 nb_burst = pbs->pkt_burst_spread[nb_pkt];
747 total_burst += nb_burst;
748 if (nb_burst > burst_stats[0]) {
749 burst_stats[1] = burst_stats[0];
750 pktnb_stats[1] = pktnb_stats[0];
751 burst_stats[0] = nb_burst;
752 pktnb_stats[0] = nb_pkt;
755 if (total_burst == 0)
757 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
758 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
759 burst_percent[0], (int) pktnb_stats[0]);
760 if (burst_stats[0] == total_burst) {
764 if (burst_stats[0] + burst_stats[1] == total_burst) {
765 printf(" + %d%% of %d pkts]\n",
766 100 - burst_percent[0], pktnb_stats[1]);
769 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
770 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
771 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
772 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
775 printf(" + %d%% of %d pkts + %d%% of others]\n",
776 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
778 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
781 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
783 struct rte_port *port;
786 static const char *fwd_stats_border = "----------------------";
788 port = &ports[port_id];
789 printf("\n %s Forward statistics for port %-2d %s\n",
790 fwd_stats_border, port_id, fwd_stats_border);
792 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
793 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
795 stats->ipackets, stats->imissed,
796 (uint64_t) (stats->ipackets + stats->imissed));
798 if (cur_fwd_eng == &csum_fwd_engine)
799 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
800 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
801 if ((stats->ierrors + stats->rx_nombuf) > 0) {
802 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
803 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
806 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
808 stats->opackets, port->tx_dropped,
809 (uint64_t) (stats->opackets + port->tx_dropped));
812 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
814 stats->ipackets, stats->imissed,
815 (uint64_t) (stats->ipackets + stats->imissed));
817 if (cur_fwd_eng == &csum_fwd_engine)
818 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
819 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
820 if ((stats->ierrors + stats->rx_nombuf) > 0) {
821 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
822 printf(" RX-nombufs: %14"PRIu64"\n",
826 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
828 stats->opackets, port->tx_dropped,
829 (uint64_t) (stats->opackets + port->tx_dropped));
832 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
834 pkt_burst_stats_display("RX",
835 &port->rx_stream->rx_burst_stats);
837 pkt_burst_stats_display("TX",
838 &port->tx_stream->tx_burst_stats);
841 if (port->rx_queue_stats_mapping_enabled) {
843 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
844 printf(" Stats reg %2d RX-packets:%14"PRIu64
845 " RX-errors:%14"PRIu64
846 " RX-bytes:%14"PRIu64"\n",
847 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
851 if (port->tx_queue_stats_mapping_enabled) {
852 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
853 printf(" Stats reg %2d TX-packets:%14"PRIu64
854 " TX-bytes:%14"PRIu64"\n",
855 i, stats->q_opackets[i], stats->q_obytes[i]);
859 printf(" %s--------------------------------%s\n",
860 fwd_stats_border, fwd_stats_border);
864 fwd_stream_stats_display(streamid_t stream_id)
866 struct fwd_stream *fs;
867 static const char *fwd_top_stats_border = "-------";
869 fs = fwd_streams[stream_id];
870 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
871 (fs->fwd_dropped == 0))
873 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
874 "TX Port=%2d/Queue=%2d %s\n",
875 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
876 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
877 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
878 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
880 /* if checksum mode */
881 if (cur_fwd_eng == &csum_fwd_engine) {
882 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
883 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
886 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
887 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
888 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
893 flush_fwd_rx_queues(void)
895 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
902 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
903 uint64_t timer_period;
905 /* convert to number of cycles */
906 timer_period = rte_get_timer_hz(); /* 1 second timeout */
908 for (j = 0; j < 2; j++) {
909 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
910 for (rxq = 0; rxq < nb_rxq; rxq++) {
911 port_id = fwd_ports_ids[rxp];
913 * testpmd can stuck in the below do while loop
914 * if rte_eth_rx_burst() always returns nonzero
915 * packets. So timer is added to exit this loop
916 * after 1sec timer expiry.
918 prev_tsc = rte_rdtsc();
920 nb_rx = rte_eth_rx_burst(port_id, rxq,
921 pkts_burst, MAX_PKT_BURST);
922 for (i = 0; i < nb_rx; i++)
923 rte_pktmbuf_free(pkts_burst[i]);
925 cur_tsc = rte_rdtsc();
926 diff_tsc = cur_tsc - prev_tsc;
927 timer_tsc += diff_tsc;
928 } while ((nb_rx > 0) &&
929 (timer_tsc < timer_period));
933 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
938 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
940 struct fwd_stream **fsm;
943 #ifdef RTE_LIBRTE_BITRATE
944 uint64_t tics_per_1sec;
946 uint64_t tics_current;
947 uint8_t idx_port, cnt_ports;
949 cnt_ports = rte_eth_dev_count();
950 tics_datum = rte_rdtsc();
951 tics_per_1sec = rte_get_timer_hz();
953 fsm = &fwd_streams[fc->stream_idx];
954 nb_fs = fc->stream_nb;
956 for (sm_id = 0; sm_id < nb_fs; sm_id++)
957 (*pkt_fwd)(fsm[sm_id]);
958 #ifdef RTE_LIBRTE_BITRATE
959 if (bitrate_enabled != 0 &&
960 bitrate_lcore_id == rte_lcore_id()) {
961 tics_current = rte_rdtsc();
962 if (tics_current - tics_datum >= tics_per_1sec) {
963 /* Periodic bitrate calculation */
965 idx_port < cnt_ports;
967 rte_stats_bitrate_calc(bitrate_data,
969 tics_datum = tics_current;
973 #ifdef RTE_LIBRTE_LATENCY_STATS
974 if (latencystats_enabled != 0 &&
975 latencystats_lcore_id == rte_lcore_id())
976 rte_latencystats_update();
979 } while (! fc->stopped);
983 start_pkt_forward_on_core(void *fwd_arg)
985 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
986 cur_fwd_config.fwd_eng->packet_fwd);
991 * Run the TXONLY packet forwarding engine to send a single burst of packets.
992 * Used to start communication flows in network loopback test configurations.
995 run_one_txonly_burst_on_core(void *fwd_arg)
997 struct fwd_lcore *fwd_lc;
998 struct fwd_lcore tmp_lcore;
1000 fwd_lc = (struct fwd_lcore *) fwd_arg;
1001 tmp_lcore = *fwd_lc;
1002 tmp_lcore.stopped = 1;
1003 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1008 * Launch packet forwarding:
1009 * - Setup per-port forwarding context.
1010 * - launch logical cores with their forwarding configuration.
1013 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1015 port_fwd_begin_t port_fwd_begin;
1020 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1021 if (port_fwd_begin != NULL) {
1022 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1023 (*port_fwd_begin)(fwd_ports_ids[i]);
1025 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1026 lc_id = fwd_lcores_cpuids[i];
1027 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1028 fwd_lcores[i]->stopped = 0;
1029 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1030 fwd_lcores[i], lc_id);
1032 printf("launch lcore %u failed - diag=%d\n",
1039 * Launch packet forwarding configuration.
1042 start_packet_forwarding(int with_tx_first)
1044 port_fwd_begin_t port_fwd_begin;
1045 port_fwd_end_t port_fwd_end;
1046 struct rte_port *port;
1051 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1052 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1054 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1055 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1057 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1058 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1059 (!nb_rxq || !nb_txq))
1060 rte_exit(EXIT_FAILURE,
1061 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1062 cur_fwd_eng->fwd_mode_name);
1064 if (all_ports_started() == 0) {
1065 printf("Not all ports were started\n");
1068 if (test_done == 0) {
1069 printf("Packet forwarding already started\n");
1073 if (init_fwd_streams() < 0) {
1074 printf("Fail from init_fwd_streams()\n");
1079 for (i = 0; i < nb_fwd_ports; i++) {
1080 pt_id = fwd_ports_ids[i];
1081 port = &ports[pt_id];
1082 if (!port->dcb_flag) {
1083 printf("In DCB mode, all forwarding ports must "
1084 "be configured in this mode.\n");
1088 if (nb_fwd_lcores == 1) {
1089 printf("In DCB mode,the nb forwarding cores "
1090 "should be larger than 1.\n");
1097 flush_fwd_rx_queues();
1100 pkt_fwd_config_display(&cur_fwd_config);
1101 rxtx_config_display();
1103 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1104 pt_id = fwd_ports_ids[i];
1105 port = &ports[pt_id];
1106 rte_eth_stats_get(pt_id, &port->stats);
1107 port->tx_dropped = 0;
1109 map_port_queue_stats_mapping_registers(pt_id, port);
1111 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1112 fwd_streams[sm_id]->rx_packets = 0;
1113 fwd_streams[sm_id]->tx_packets = 0;
1114 fwd_streams[sm_id]->fwd_dropped = 0;
1115 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1116 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1118 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1119 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1120 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1121 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1122 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1124 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1125 fwd_streams[sm_id]->core_cycles = 0;
1128 if (with_tx_first) {
1129 port_fwd_begin = tx_only_engine.port_fwd_begin;
1130 if (port_fwd_begin != NULL) {
1131 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1132 (*port_fwd_begin)(fwd_ports_ids[i]);
1134 while (with_tx_first--) {
1135 launch_packet_forwarding(
1136 run_one_txonly_burst_on_core);
1137 rte_eal_mp_wait_lcore();
1139 port_fwd_end = tx_only_engine.port_fwd_end;
1140 if (port_fwd_end != NULL) {
1141 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1142 (*port_fwd_end)(fwd_ports_ids[i]);
1145 launch_packet_forwarding(start_pkt_forward_on_core);
1149 stop_packet_forwarding(void)
1151 struct rte_eth_stats stats;
1152 struct rte_port *port;
1153 port_fwd_end_t port_fwd_end;
1158 uint64_t total_recv;
1159 uint64_t total_xmit;
1160 uint64_t total_rx_dropped;
1161 uint64_t total_tx_dropped;
1162 uint64_t total_rx_nombuf;
1163 uint64_t tx_dropped;
1164 uint64_t rx_bad_ip_csum;
1165 uint64_t rx_bad_l4_csum;
1166 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167 uint64_t fwd_cycles;
1169 static const char *acc_stats_border = "+++++++++++++++";
1172 printf("Packet forwarding not started\n");
1175 printf("Telling cores to stop...");
1176 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1177 fwd_lcores[lc_id]->stopped = 1;
1178 printf("\nWaiting for lcores to finish...\n");
1179 rte_eal_mp_wait_lcore();
1180 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1181 if (port_fwd_end != NULL) {
1182 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183 pt_id = fwd_ports_ids[i];
1184 (*port_fwd_end)(pt_id);
1187 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1190 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1191 if (cur_fwd_config.nb_fwd_streams >
1192 cur_fwd_config.nb_fwd_ports) {
1193 fwd_stream_stats_display(sm_id);
1194 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1195 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1197 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1199 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1202 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1203 tx_dropped = (uint64_t) (tx_dropped +
1204 fwd_streams[sm_id]->fwd_dropped);
1205 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1208 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1209 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1210 fwd_streams[sm_id]->rx_bad_ip_csum);
1211 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1215 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1216 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1217 fwd_streams[sm_id]->rx_bad_l4_csum);
1218 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1221 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1222 fwd_cycles = (uint64_t) (fwd_cycles +
1223 fwd_streams[sm_id]->core_cycles);
1228 total_rx_dropped = 0;
1229 total_tx_dropped = 0;
1230 total_rx_nombuf = 0;
1231 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1232 pt_id = fwd_ports_ids[i];
1234 port = &ports[pt_id];
1235 rte_eth_stats_get(pt_id, &stats);
1236 stats.ipackets -= port->stats.ipackets;
1237 port->stats.ipackets = 0;
1238 stats.opackets -= port->stats.opackets;
1239 port->stats.opackets = 0;
1240 stats.ibytes -= port->stats.ibytes;
1241 port->stats.ibytes = 0;
1242 stats.obytes -= port->stats.obytes;
1243 port->stats.obytes = 0;
1244 stats.imissed -= port->stats.imissed;
1245 port->stats.imissed = 0;
1246 stats.oerrors -= port->stats.oerrors;
1247 port->stats.oerrors = 0;
1248 stats.rx_nombuf -= port->stats.rx_nombuf;
1249 port->stats.rx_nombuf = 0;
1251 total_recv += stats.ipackets;
1252 total_xmit += stats.opackets;
1253 total_rx_dropped += stats.imissed;
1254 total_tx_dropped += port->tx_dropped;
1255 total_rx_nombuf += stats.rx_nombuf;
1257 fwd_port_stats_display(pt_id, &stats);
1259 printf("\n %s Accumulated forward statistics for all ports"
1261 acc_stats_border, acc_stats_border);
1262 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1264 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1266 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1267 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1268 if (total_rx_nombuf > 0)
1269 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1270 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1272 acc_stats_border, acc_stats_border);
1273 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1275 printf("\n CPU cycles/packet=%u (total cycles="
1276 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1277 (unsigned int)(fwd_cycles / total_recv),
1278 fwd_cycles, total_recv);
1280 printf("\nDone.\n");
1285 dev_set_link_up(portid_t pid)
1287 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1288 printf("\nSet link up fail.\n");
1292 dev_set_link_down(portid_t pid)
1294 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1295 printf("\nSet link down fail.\n");
1299 all_ports_started(void)
1302 struct rte_port *port;
1304 RTE_ETH_FOREACH_DEV(pi) {
1306 /* Check if there is a port which is not started */
1307 if ((port->port_status != RTE_PORT_STARTED) &&
1308 (port->slave_flag == 0))
1312 /* No port is not started */
1317 all_ports_stopped(void)
1320 struct rte_port *port;
1322 RTE_ETH_FOREACH_DEV(pi) {
1324 if ((port->port_status != RTE_PORT_STOPPED) &&
1325 (port->slave_flag == 0))
1333 port_is_started(portid_t port_id)
1335 if (port_id_is_invalid(port_id, ENABLED_WARN))
1338 if (ports[port_id].port_status != RTE_PORT_STARTED)
1345 port_is_closed(portid_t port_id)
1347 if (port_id_is_invalid(port_id, ENABLED_WARN))
1350 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1357 start_port(portid_t pid)
1359 int diag, need_check_link_status = -1;
1362 struct rte_port *port;
1363 struct ether_addr mac_addr;
1364 enum rte_eth_event_type event_type;
1366 if (port_id_is_invalid(pid, ENABLED_WARN))
1371 RTE_ETH_FOREACH_DEV(pi) {
1372 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1375 need_check_link_status = 0;
1377 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1378 RTE_PORT_HANDLING) == 0) {
1379 printf("Port %d is now not stopped\n", pi);
1383 if (port->need_reconfig > 0) {
1384 port->need_reconfig = 0;
1386 printf("Configuring Port %d (socket %u)\n", pi,
1388 /* configure port */
1389 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1392 if (rte_atomic16_cmpset(&(port->port_status),
1393 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1394 printf("Port %d can not be set back "
1395 "to stopped\n", pi);
1396 printf("Fail to configure port %d\n", pi);
1397 /* try to reconfigure port next time */
1398 port->need_reconfig = 1;
1402 if (port->need_reconfig_queues > 0) {
1403 port->need_reconfig_queues = 0;
1404 /* setup tx queues */
1405 for (qi = 0; qi < nb_txq; qi++) {
1406 if ((numa_support) &&
1407 (txring_numa[pi] != NUMA_NO_CONFIG))
1408 diag = rte_eth_tx_queue_setup(pi, qi,
1409 nb_txd,txring_numa[pi],
1412 diag = rte_eth_tx_queue_setup(pi, qi,
1413 nb_txd,port->socket_id,
1419 /* Fail to setup tx queue, return */
1420 if (rte_atomic16_cmpset(&(port->port_status),
1422 RTE_PORT_STOPPED) == 0)
1423 printf("Port %d can not be set back "
1424 "to stopped\n", pi);
1425 printf("Fail to configure port %d tx queues\n", pi);
1426 /* try to reconfigure queues next time */
1427 port->need_reconfig_queues = 1;
1430 /* setup rx queues */
1431 for (qi = 0; qi < nb_rxq; qi++) {
1432 if ((numa_support) &&
1433 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1434 struct rte_mempool * mp =
1435 mbuf_pool_find(rxring_numa[pi]);
1437 printf("Failed to setup RX queue:"
1438 "No mempool allocation"
1439 " on the socket %d\n",
1444 diag = rte_eth_rx_queue_setup(pi, qi,
1445 nb_rxd,rxring_numa[pi],
1446 &(port->rx_conf),mp);
1448 struct rte_mempool *mp =
1449 mbuf_pool_find(port->socket_id);
1451 printf("Failed to setup RX queue:"
1452 "No mempool allocation"
1453 " on the socket %d\n",
1457 diag = rte_eth_rx_queue_setup(pi, qi,
1458 nb_rxd,port->socket_id,
1459 &(port->rx_conf), mp);
1464 /* Fail to setup rx queue, return */
1465 if (rte_atomic16_cmpset(&(port->port_status),
1467 RTE_PORT_STOPPED) == 0)
1468 printf("Port %d can not be set back "
1469 "to stopped\n", pi);
1470 printf("Fail to configure port %d rx queues\n", pi);
1471 /* try to reconfigure queues next time */
1472 port->need_reconfig_queues = 1;
1477 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1478 event_type < RTE_ETH_EVENT_MAX;
1480 diag = rte_eth_dev_callback_register(pi,
1485 printf("Failed to setup even callback for event %d\n",
1492 if (rte_eth_dev_start(pi) < 0) {
1493 printf("Fail to start port %d\n", pi);
1495 /* Fail to setup rx queue, return */
1496 if (rte_atomic16_cmpset(&(port->port_status),
1497 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1498 printf("Port %d can not be set back to "
1503 if (rte_atomic16_cmpset(&(port->port_status),
1504 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1505 printf("Port %d can not be set into started\n", pi);
1507 rte_eth_macaddr_get(pi, &mac_addr);
1508 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1509 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1510 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1511 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1513 /* at least one port started, need checking link status */
1514 need_check_link_status = 1;
1517 if (need_check_link_status == 1 && !no_link_check)
1518 check_all_ports_link_status(RTE_PORT_ALL);
1519 else if (need_check_link_status == 0)
1520 printf("Please stop the ports first\n");
1527 stop_port(portid_t pid)
1530 struct rte_port *port;
1531 int need_check_link_status = 0;
1538 if (port_id_is_invalid(pid, ENABLED_WARN))
1541 printf("Stopping ports...\n");
1543 RTE_ETH_FOREACH_DEV(pi) {
1544 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1547 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1548 printf("Please remove port %d from forwarding configuration.\n", pi);
1552 if (port_is_bonding_slave(pi)) {
1553 printf("Please remove port %d from bonded device.\n", pi);
1558 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1559 RTE_PORT_HANDLING) == 0)
1562 rte_eth_dev_stop(pi);
1564 if (rte_atomic16_cmpset(&(port->port_status),
1565 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1566 printf("Port %d can not be set into stopped\n", pi);
1567 need_check_link_status = 1;
1569 if (need_check_link_status && !no_link_check)
1570 check_all_ports_link_status(RTE_PORT_ALL);
1576 close_port(portid_t pid)
1579 struct rte_port *port;
1581 if (port_id_is_invalid(pid, ENABLED_WARN))
1584 printf("Closing ports...\n");
1586 RTE_ETH_FOREACH_DEV(pi) {
1587 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1590 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1591 printf("Please remove port %d from forwarding configuration.\n", pi);
1595 if (port_is_bonding_slave(pi)) {
1596 printf("Please remove port %d from bonded device.\n", pi);
1601 if (rte_atomic16_cmpset(&(port->port_status),
1602 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1603 printf("Port %d is already closed\n", pi);
1607 if (rte_atomic16_cmpset(&(port->port_status),
1608 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1609 printf("Port %d is now not stopped\n", pi);
1613 if (port->flow_list)
1614 port_flow_flush(pi);
1615 rte_eth_dev_close(pi);
1617 if (rte_atomic16_cmpset(&(port->port_status),
1618 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1619 printf("Port %d cannot be set to closed\n", pi);
1626 attach_port(char *identifier)
1629 unsigned int socket_id;
1631 printf("Attaching a new port...\n");
1633 if (identifier == NULL) {
1634 printf("Invalid parameters are specified\n");
1638 if (rte_eth_dev_attach(identifier, &pi))
1641 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1642 /* if socket_id is invalid, set to 0 */
1643 if (check_socket_id(socket_id) < 0)
1645 reconfig(pi, socket_id);
1646 rte_eth_promiscuous_enable(pi);
1648 nb_ports = rte_eth_dev_count();
1650 ports[pi].port_status = RTE_PORT_STOPPED;
1652 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1657 detach_port(uint8_t port_id)
1659 char name[RTE_ETH_NAME_MAX_LEN];
1661 printf("Detaching a port...\n");
1663 if (!port_is_closed(port_id)) {
1664 printf("Please close port first\n");
1668 if (ports[port_id].flow_list)
1669 port_flow_flush(port_id);
1671 if (rte_eth_dev_detach(port_id, name))
1674 nb_ports = rte_eth_dev_count();
1676 printf("Port '%s' is detached. Now total ports is %d\n",
1688 stop_packet_forwarding();
1690 if (ports != NULL) {
1692 RTE_ETH_FOREACH_DEV(pt_id) {
1693 printf("\nShutting down port %d...\n", pt_id);
1699 printf("\nBye...\n");
1702 typedef void (*cmd_func_t)(void);
1703 struct pmd_test_command {
1704 const char *cmd_name;
1705 cmd_func_t cmd_func;
1708 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1710 /* Check the link status of all ports in up to 9s, and print them finally */
1712 check_all_ports_link_status(uint32_t port_mask)
1714 #define CHECK_INTERVAL 100 /* 100ms */
1715 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1716 uint8_t portid, count, all_ports_up, print_flag = 0;
1717 struct rte_eth_link link;
1719 printf("Checking link statuses...\n");
1721 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1723 RTE_ETH_FOREACH_DEV(portid) {
1724 if ((port_mask & (1 << portid)) == 0)
1726 memset(&link, 0, sizeof(link));
1727 rte_eth_link_get_nowait(portid, &link);
1728 /* print link status if flag set */
1729 if (print_flag == 1) {
1730 if (link.link_status)
1731 printf("Port %d Link Up - speed %u "
1732 "Mbps - %s\n", (uint8_t)portid,
1733 (unsigned)link.link_speed,
1734 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1735 ("full-duplex") : ("half-duplex\n"));
1737 printf("Port %d Link Down\n",
1741 /* clear all_ports_up flag if any link down */
1742 if (link.link_status == ETH_LINK_DOWN) {
1747 /* after finally printing all link status, get out */
1748 if (print_flag == 1)
1751 if (all_ports_up == 0) {
1753 rte_delay_ms(CHECK_INTERVAL);
1756 /* set the print_flag if all ports up or timeout */
1757 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1767 rmv_event_callback(void *arg)
1769 struct rte_eth_dev *dev;
1770 struct rte_devargs *da;
1772 uint8_t port_id = (intptr_t)arg;
1774 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1775 dev = &rte_eth_devices[port_id];
1776 da = dev->device->devargs;
1779 close_port(port_id);
1780 if (da->type == RTE_DEVTYPE_VIRTUAL)
1781 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1782 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1783 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1784 printf("removing device %s\n", name);
1785 rte_eal_dev_detach(name);
1786 dev->state = RTE_ETH_DEV_UNUSED;
1789 /* This function is used by the interrupt thread */
1791 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1793 static const char * const event_desc[] = {
1794 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1795 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1796 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1797 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1798 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1799 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1800 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1801 [RTE_ETH_EVENT_MAX] = NULL,
1804 RTE_SET_USED(param);
1806 if (type >= RTE_ETH_EVENT_MAX) {
1807 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1808 port_id, __func__, type);
1811 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1817 case RTE_ETH_EVENT_INTR_RMV:
1818 if (rte_eal_alarm_set(100000,
1819 rmv_event_callback, (void *)(intptr_t)port_id))
1820 fprintf(stderr, "Could not set up deferred device removal\n");
1828 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1832 uint8_t mapping_found = 0;
1834 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1835 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1836 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1837 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1838 tx_queue_stats_mappings[i].queue_id,
1839 tx_queue_stats_mappings[i].stats_counter_id);
1846 port->tx_queue_stats_mapping_enabled = 1;
1851 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1855 uint8_t mapping_found = 0;
1857 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1858 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1859 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1860 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1861 rx_queue_stats_mappings[i].queue_id,
1862 rx_queue_stats_mappings[i].stats_counter_id);
1869 port->rx_queue_stats_mapping_enabled = 1;
1874 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1878 diag = set_tx_queue_stats_mapping_registers(pi, port);
1880 if (diag == -ENOTSUP) {
1881 port->tx_queue_stats_mapping_enabled = 0;
1882 printf("TX queue stats mapping not supported port id=%d\n", pi);
1885 rte_exit(EXIT_FAILURE,
1886 "set_tx_queue_stats_mapping_registers "
1887 "failed for port id=%d diag=%d\n",
1891 diag = set_rx_queue_stats_mapping_registers(pi, port);
1893 if (diag == -ENOTSUP) {
1894 port->rx_queue_stats_mapping_enabled = 0;
1895 printf("RX queue stats mapping not supported port id=%d\n", pi);
1898 rte_exit(EXIT_FAILURE,
1899 "set_rx_queue_stats_mapping_registers "
1900 "failed for port id=%d diag=%d\n",
1906 rxtx_port_config(struct rte_port *port)
1908 port->rx_conf = port->dev_info.default_rxconf;
1909 port->tx_conf = port->dev_info.default_txconf;
1911 /* Check if any RX/TX parameters have been passed */
1912 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1913 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1915 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1916 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1918 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1919 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1921 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1922 port->rx_conf.rx_free_thresh = rx_free_thresh;
1924 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1925 port->rx_conf.rx_drop_en = rx_drop_en;
1927 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1928 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1930 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1931 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1933 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1934 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1936 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1937 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1939 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1940 port->tx_conf.tx_free_thresh = tx_free_thresh;
1942 if (txq_flags != RTE_PMD_PARAM_UNSET)
1943 port->tx_conf.txq_flags = txq_flags;
1947 init_port_config(void)
1950 struct rte_port *port;
1952 RTE_ETH_FOREACH_DEV(pid) {
1954 port->dev_conf.rxmode = rx_mode;
1955 port->dev_conf.fdir_conf = fdir_conf;
1957 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1958 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1960 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1961 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1964 if (port->dcb_flag == 0) {
1965 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1966 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1968 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1971 rxtx_port_config(port);
1973 rte_eth_macaddr_get(pid, &port->eth_addr);
1975 map_port_queue_stats_mapping_registers(pid, port);
1976 #ifdef RTE_NIC_BYPASS
1977 rte_eth_dev_bypass_init(pid);
1980 if (lsc_interrupt &&
1981 (rte_eth_devices[pid].data->dev_flags &
1982 RTE_ETH_DEV_INTR_LSC))
1983 port->dev_conf.intr_conf.lsc = 1;
1984 if (rmv_interrupt &&
1985 (rte_eth_devices[pid].data->dev_flags &
1986 RTE_ETH_DEV_INTR_RMV))
1987 port->dev_conf.intr_conf.rmv = 1;
1991 void set_port_slave_flag(portid_t slave_pid)
1993 struct rte_port *port;
1995 port = &ports[slave_pid];
1996 port->slave_flag = 1;
1999 void clear_port_slave_flag(portid_t slave_pid)
2001 struct rte_port *port;
2003 port = &ports[slave_pid];
2004 port->slave_flag = 0;
2007 uint8_t port_is_bonding_slave(portid_t slave_pid)
2009 struct rte_port *port;
2011 port = &ports[slave_pid];
2012 return port->slave_flag;
2015 const uint16_t vlan_tags[] = {
2016 0, 1, 2, 3, 4, 5, 6, 7,
2017 8, 9, 10, 11, 12, 13, 14, 15,
2018 16, 17, 18, 19, 20, 21, 22, 23,
2019 24, 25, 26, 27, 28, 29, 30, 31
2023 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2024 enum dcb_mode_enable dcb_mode,
2025 enum rte_eth_nb_tcs num_tcs,
2031 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2032 * given above, and the number of traffic classes available for use.
2034 if (dcb_mode == DCB_VT_ENABLED) {
2035 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2036 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2037 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2038 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2040 /* VMDQ+DCB RX and TX configurations */
2041 vmdq_rx_conf->enable_default_pool = 0;
2042 vmdq_rx_conf->default_pool = 0;
2043 vmdq_rx_conf->nb_queue_pools =
2044 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2045 vmdq_tx_conf->nb_queue_pools =
2046 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2048 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2049 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2050 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2051 vmdq_rx_conf->pool_map[i].pools =
2052 1 << (i % vmdq_rx_conf->nb_queue_pools);
2054 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2055 vmdq_rx_conf->dcb_tc[i] = i;
2056 vmdq_tx_conf->dcb_tc[i] = i;
2059 /* set DCB mode of RX and TX of multiple queues */
2060 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2061 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2063 struct rte_eth_dcb_rx_conf *rx_conf =
2064 ð_conf->rx_adv_conf.dcb_rx_conf;
2065 struct rte_eth_dcb_tx_conf *tx_conf =
2066 ð_conf->tx_adv_conf.dcb_tx_conf;
2068 rx_conf->nb_tcs = num_tcs;
2069 tx_conf->nb_tcs = num_tcs;
2071 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2072 rx_conf->dcb_tc[i] = i % num_tcs;
2073 tx_conf->dcb_tc[i] = i % num_tcs;
2075 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2076 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2077 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2081 eth_conf->dcb_capability_en =
2082 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2084 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2090 init_port_dcb_config(portid_t pid,
2091 enum dcb_mode_enable dcb_mode,
2092 enum rte_eth_nb_tcs num_tcs,
2095 struct rte_eth_conf port_conf;
2096 struct rte_port *rte_port;
2100 rte_port = &ports[pid];
2102 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2103 /* Enter DCB configuration status */
2106 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2107 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2110 port_conf.rxmode.hw_vlan_filter = 1;
2113 * Write the configuration into the device.
2114 * Set the numbers of RX & TX queues to 0, so
2115 * the RX & TX queues will not be setup.
2117 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2119 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2121 /* If dev_info.vmdq_pool_base is greater than 0,
2122 * the queue id of vmdq pools is started after pf queues.
2124 if (dcb_mode == DCB_VT_ENABLED &&
2125 rte_port->dev_info.vmdq_pool_base > 0) {
2126 printf("VMDQ_DCB multi-queue mode is nonsensical"
2127 " for port %d.", pid);
2131 /* Assume the ports in testpmd have the same dcb capability
2132 * and has the same number of rxq and txq in dcb mode
2134 if (dcb_mode == DCB_VT_ENABLED) {
2135 if (rte_port->dev_info.max_vfs > 0) {
2136 nb_rxq = rte_port->dev_info.nb_rx_queues;
2137 nb_txq = rte_port->dev_info.nb_tx_queues;
2139 nb_rxq = rte_port->dev_info.max_rx_queues;
2140 nb_txq = rte_port->dev_info.max_tx_queues;
2143 /*if vt is disabled, use all pf queues */
2144 if (rte_port->dev_info.vmdq_pool_base == 0) {
2145 nb_rxq = rte_port->dev_info.max_rx_queues;
2146 nb_txq = rte_port->dev_info.max_tx_queues;
2148 nb_rxq = (queueid_t)num_tcs;
2149 nb_txq = (queueid_t)num_tcs;
2153 rx_free_thresh = 64;
2155 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2157 rxtx_port_config(rte_port);
2159 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2160 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2161 rx_vft_set(pid, vlan_tags[i], 1);
2163 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2164 map_port_queue_stats_mapping_registers(pid, rte_port);
2166 rte_port->dcb_flag = 1;
2174 /* Configuration of Ethernet ports. */
2175 ports = rte_zmalloc("testpmd: ports",
2176 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2177 RTE_CACHE_LINE_SIZE);
2178 if (ports == NULL) {
2179 rte_exit(EXIT_FAILURE,
2180 "rte_zmalloc(%d struct rte_port) failed\n",
2193 signal_handler(int signum)
2195 if (signum == SIGINT || signum == SIGTERM) {
2196 printf("\nSignal %d received, preparing to exit...\n",
2198 #ifdef RTE_LIBRTE_PDUMP
2199 /* uninitialize packet capture framework */
2202 #ifdef RTE_LIBRTE_LATENCY_STATS
2203 rte_latencystats_uninit();
2206 /* exit with the expected status */
2207 signal(signum, SIG_DFL);
2208 kill(getpid(), signum);
2213 main(int argc, char** argv)
2218 signal(SIGINT, signal_handler);
2219 signal(SIGTERM, signal_handler);
2221 diag = rte_eal_init(argc, argv);
2223 rte_panic("Cannot init EAL\n");
2225 #ifdef RTE_LIBRTE_PDUMP
2226 /* initialize packet capture framework */
2227 rte_pdump_init(NULL);
2230 nb_ports = (portid_t) rte_eth_dev_count();
2232 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2234 /* allocate port structures, and init them */
2237 set_def_fwd_config();
2239 rte_panic("Empty set of forwarding logical cores - check the "
2240 "core mask supplied in the command parameters\n");
2242 /* Bitrate/latency stats disabled by default */
2243 #ifdef RTE_LIBRTE_BITRATE
2244 bitrate_enabled = 0;
2246 #ifdef RTE_LIBRTE_LATENCY_STATS
2247 latencystats_enabled = 0;
2253 launch_args_parse(argc, argv);
2255 if (!nb_rxq && !nb_txq)
2256 printf("Warning: Either rx or tx queues should be non-zero\n");
2258 if (nb_rxq > 1 && nb_rxq > nb_txq)
2259 printf("Warning: nb_rxq=%d enables RSS configuration, "
2260 "but nb_txq=%d will prevent to fully test it.\n",
2264 if (start_port(RTE_PORT_ALL) != 0)
2265 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2267 /* set all ports to promiscuous mode by default */
2268 RTE_ETH_FOREACH_DEV(port_id)
2269 rte_eth_promiscuous_enable(port_id);
2271 /* Init metrics library */
2272 rte_metrics_init(rte_socket_id());
2274 #ifdef RTE_LIBRTE_LATENCY_STATS
2275 if (latencystats_enabled != 0) {
2276 int ret = rte_latencystats_init(1, NULL);
2278 printf("Warning: latencystats init()"
2279 " returned error %d\n", ret);
2280 printf("Latencystats running on lcore %d\n",
2281 latencystats_lcore_id);
2285 /* Setup bitrate stats */
2286 #ifdef RTE_LIBRTE_BITRATE
2287 if (bitrate_enabled != 0) {
2288 bitrate_data = rte_stats_bitrate_create();
2289 if (bitrate_data == NULL)
2290 rte_exit(EXIT_FAILURE,
2291 "Could not allocate bitrate data.\n");
2292 rte_stats_bitrate_reg(bitrate_data);
2296 #ifdef RTE_LIBRTE_CMDLINE
2297 if (strlen(cmdline_filename) != 0)
2298 cmdline_read_from_file(cmdline_filename);
2300 if (interactive == 1) {
2302 printf("Start automatic packet forwarding\n");
2303 start_packet_forwarding(0);
2313 printf("No commandline core given, start packet forwarding\n");
2314 start_packet_forwarding(0);
2315 printf("Press enter to exit\n");
2316 rc = read(0, &c, 1);