4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
101 * NUMA support configuration.
102 * When set, the NUMA support attempts to dispatch the allocation of the
103 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104 * probed ports among the CPU sockets 0 and 1.
105 * Otherwise, all memory is allocated from CPU socket 0.
107 uint8_t numa_support = 1; /**< numa enabled by default */
110 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113 uint8_t socket_num = UMA_NO_CONFIG;
116 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
121 * Record the Ethernet address of peer target ports to which packets are
123 * Must be instantiated with the ethernet addresses of peer traffic generator
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
130 * Probed Target Environment.
132 struct rte_port *ports; /**< For all probed ethernet ports. */
133 portid_t nb_ports; /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
138 * Test Forwarding Configuration.
139 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t nb_cfg_ports; /**< Number of configured ports. */
145 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
154 * Forwarding engines.
156 struct fwd_engine * fwd_engines[] = {
165 #ifdef RTE_LIBRTE_IEEE1588
166 &ieee1588_fwd_engine,
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
179 * specified on command-line. */
182 * Configuration of packet segments used by the "txonly" processing engine.
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 TXONLY_DEF_PACKET_LEN,
188 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
203 * Configurable number of RX/TX queues.
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 * Configurable number of RX/TX ring descriptors.
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
216 #define RTE_PMD_PARAM_UNSET -1
218 * Configurable values of RX and TX ring threshold registers.
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 * Configurable value of RX free threshold.
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX drop enable.
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX free threshold.
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX RS bit threshold.
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX queue flags.
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Avoids to check link status when starting/stopping a port.
272 uint8_t no_link_check = 0; /* check by default */
275 * Enable link status change notification
277 uint8_t lsc_interrupt = 1; /* enabled by default */
280 * Enable device removal notification.
282 uint8_t rmv_interrupt = 1; /* enabled by default */
285 * NIC bypass mode configuration options.
287 #ifdef RTE_NIC_BYPASS
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
294 #ifdef RTE_LIBRTE_LATENCY_STATS
297 * Set when latency stats is enabled in the commandline
299 uint8_t latencystats_enabled;
302 * Lcore ID to serive latency statistics.
304 lcoreid_t latencystats_lcore_id = -1;
309 * Ethernet device configuration.
311 struct rte_eth_rxmode rx_mode = {
312 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
314 .header_split = 0, /**< Header Split disabled. */
315 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
318 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
320 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
323 struct rte_fdir_conf fdir_conf = {
324 .mode = RTE_FDIR_MODE_NONE,
325 .pballoc = RTE_FDIR_PBALLOC_64K,
326 .status = RTE_FDIR_REPORT_STATUS,
328 .vlan_tci_mask = 0x0,
330 .src_ip = 0xFFFFFFFF,
331 .dst_ip = 0xFFFFFFFF,
334 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
337 .src_port_mask = 0xFFFF,
338 .dst_port_mask = 0xFFFF,
339 .mac_addr_byte_mask = 0xFF,
340 .tunnel_type_mask = 1,
341 .tunnel_id_mask = 0xFFFFFFFF,
346 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
348 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
349 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
351 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
352 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
354 uint16_t nb_tx_queue_stats_mappings = 0;
355 uint16_t nb_rx_queue_stats_mappings = 0;
357 unsigned max_socket = 0;
359 #ifdef RTE_LIBRTE_BITRATE
360 /* Bitrate statistics */
361 struct rte_stats_bitrates *bitrate_data;
362 lcoreid_t bitrate_lcore_id;
363 uint8_t bitrate_enabled;
366 /* Forward function declarations */
367 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
368 static void check_all_ports_link_status(uint32_t port_mask);
369 static void eth_event_callback(uint8_t port_id,
370 enum rte_eth_event_type type,
374 * Check if all the ports are started.
375 * If yes, return positive value. If not, return zero.
377 static int all_ports_started(void);
380 * Setup default configuration.
383 set_default_fwd_lcores_config(void)
387 unsigned int sock_num;
390 for (i = 0; i < RTE_MAX_LCORE; i++) {
391 sock_num = rte_lcore_to_socket_id(i) + 1;
392 if (sock_num > max_socket) {
393 if (sock_num > RTE_MAX_NUMA_NODES)
394 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
395 max_socket = sock_num;
397 if (!rte_lcore_is_enabled(i))
399 if (i == rte_get_master_lcore())
401 fwd_lcores_cpuids[nb_lc++] = i;
403 nb_lcores = (lcoreid_t) nb_lc;
404 nb_cfg_lcores = nb_lcores;
409 set_def_peer_eth_addrs(void)
413 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
415 peer_eth_addrs[i].addr_bytes[5] = i;
420 set_default_fwd_ports_config(void)
424 for (pt_id = 0; pt_id < nb_ports; pt_id++)
425 fwd_ports_ids[pt_id] = pt_id;
427 nb_cfg_ports = nb_ports;
428 nb_fwd_ports = nb_ports;
432 set_def_fwd_config(void)
434 set_default_fwd_lcores_config();
435 set_def_peer_eth_addrs();
436 set_default_fwd_ports_config();
440 * Configuration initialisation done once at init time.
443 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
444 unsigned int socket_id)
446 char pool_name[RTE_MEMPOOL_NAMESIZE];
447 struct rte_mempool *rte_mp = NULL;
450 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
451 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
454 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
455 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
457 #ifdef RTE_LIBRTE_PMD_XENVIRT
458 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
459 (unsigned) mb_mempool_cache,
460 sizeof(struct rte_pktmbuf_pool_private),
461 rte_pktmbuf_pool_init, NULL,
462 rte_pktmbuf_init, NULL,
466 /* if the former XEN allocation failed fall back to normal allocation */
467 if (rte_mp == NULL) {
469 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
470 mb_size, (unsigned) mb_mempool_cache,
471 sizeof(struct rte_pktmbuf_pool_private),
476 if (rte_mempool_populate_anon(rte_mp) == 0) {
477 rte_mempool_free(rte_mp);
481 rte_pktmbuf_pool_init(rte_mp, NULL);
482 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
484 /* wrapper to rte_mempool_create() */
485 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
486 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
491 if (rte_mp == NULL) {
492 rte_exit(EXIT_FAILURE,
493 "Creation of mbuf pool for socket %u failed: %s\n",
494 socket_id, rte_strerror(rte_errno));
495 } else if (verbose_level > 0) {
496 rte_mempool_dump(stdout, rte_mp);
501 * Check given socket id is valid or not with NUMA mode,
502 * if valid, return 0, else return -1
505 check_socket_id(const unsigned int socket_id)
507 static int warning_once = 0;
509 if (socket_id >= max_socket) {
510 if (!warning_once && numa_support)
511 printf("Warning: NUMA should be configured manually by"
512 " using --port-numa-config and"
513 " --ring-numa-config parameters along with"
525 struct rte_port *port;
526 struct rte_mempool *mbp;
527 unsigned int nb_mbuf_per_pool;
529 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
531 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
534 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
535 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
536 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
539 /* Configuration of logical cores. */
540 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
541 sizeof(struct fwd_lcore *) * nb_lcores,
542 RTE_CACHE_LINE_SIZE);
543 if (fwd_lcores == NULL) {
544 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
545 "failed\n", nb_lcores);
547 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
548 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
549 sizeof(struct fwd_lcore),
550 RTE_CACHE_LINE_SIZE);
551 if (fwd_lcores[lc_id] == NULL) {
552 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
555 fwd_lcores[lc_id]->cpuid_idx = lc_id;
558 RTE_ETH_FOREACH_DEV(pid) {
560 rte_eth_dev_info_get(pid, &port->dev_info);
563 if (port_numa[pid] != NUMA_NO_CONFIG)
564 port_per_socket[port_numa[pid]]++;
566 uint32_t socket_id = rte_eth_dev_socket_id(pid);
568 /* if socket_id is invalid, set to 0 */
569 if (check_socket_id(socket_id) < 0)
571 port_per_socket[socket_id]++;
575 /* set flag to initialize port/queue */
576 port->need_reconfig = 1;
577 port->need_reconfig_queues = 1;
581 * Create pools of mbuf.
582 * If NUMA support is disabled, create a single pool of mbuf in
583 * socket 0 memory by default.
584 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
586 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
587 * nb_txd can be configured at run time.
589 if (param_total_num_mbufs)
590 nb_mbuf_per_pool = param_total_num_mbufs;
592 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
593 (nb_lcores * mb_mempool_cache) +
594 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
595 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
601 for (i = 0; i < max_socket; i++)
602 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
604 if (socket_num == UMA_NO_CONFIG)
605 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
607 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
614 * Records which Mbuf pool to use by each logical core, if needed.
616 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 mbp = mbuf_pool_find(
618 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
621 mbp = mbuf_pool_find(0);
622 fwd_lcores[lc_id]->mbp = mbp;
625 /* Configuration of packet forwarding streams. */
626 if (init_fwd_streams() < 0)
627 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
634 reconfig(portid_t new_port_id, unsigned socket_id)
636 struct rte_port *port;
638 /* Reconfiguration of Ethernet ports. */
639 port = &ports[new_port_id];
640 rte_eth_dev_info_get(new_port_id, &port->dev_info);
642 /* set flag to initialize port/queue */
643 port->need_reconfig = 1;
644 port->need_reconfig_queues = 1;
645 port->socket_id = socket_id;
652 init_fwd_streams(void)
655 struct rte_port *port;
656 streamid_t sm_id, nb_fwd_streams_new;
659 /* set socket id according to numa or not */
660 RTE_ETH_FOREACH_DEV(pid) {
662 if (nb_rxq > port->dev_info.max_rx_queues) {
663 printf("Fail: nb_rxq(%d) is greater than "
664 "max_rx_queues(%d)\n", nb_rxq,
665 port->dev_info.max_rx_queues);
668 if (nb_txq > port->dev_info.max_tx_queues) {
669 printf("Fail: nb_txq(%d) is greater than "
670 "max_tx_queues(%d)\n", nb_txq,
671 port->dev_info.max_tx_queues);
675 if (port_numa[pid] != NUMA_NO_CONFIG)
676 port->socket_id = port_numa[pid];
678 port->socket_id = rte_eth_dev_socket_id(pid);
680 /* if socket_id is invalid, set to 0 */
681 if (check_socket_id(port->socket_id) < 0)
686 if (socket_num == UMA_NO_CONFIG)
689 port->socket_id = socket_num;
693 q = RTE_MAX(nb_rxq, nb_txq);
695 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
698 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
699 if (nb_fwd_streams_new == nb_fwd_streams)
702 if (fwd_streams != NULL) {
703 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
704 if (fwd_streams[sm_id] == NULL)
706 rte_free(fwd_streams[sm_id]);
707 fwd_streams[sm_id] = NULL;
709 rte_free(fwd_streams);
714 nb_fwd_streams = nb_fwd_streams_new;
715 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
716 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
717 if (fwd_streams == NULL)
718 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
719 "failed\n", nb_fwd_streams);
721 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
722 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
723 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
724 if (fwd_streams[sm_id] == NULL)
725 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
736 unsigned int total_burst;
737 unsigned int nb_burst;
738 unsigned int burst_stats[3];
739 uint16_t pktnb_stats[3];
741 int burst_percent[3];
744 * First compute the total number of packet bursts and the
745 * two highest numbers of bursts of the same number of packets.
748 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
749 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
750 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
751 nb_burst = pbs->pkt_burst_spread[nb_pkt];
754 total_burst += nb_burst;
755 if (nb_burst > burst_stats[0]) {
756 burst_stats[1] = burst_stats[0];
757 pktnb_stats[1] = pktnb_stats[0];
758 burst_stats[0] = nb_burst;
759 pktnb_stats[0] = nb_pkt;
762 if (total_burst == 0)
764 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
765 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
766 burst_percent[0], (int) pktnb_stats[0]);
767 if (burst_stats[0] == total_burst) {
771 if (burst_stats[0] + burst_stats[1] == total_burst) {
772 printf(" + %d%% of %d pkts]\n",
773 100 - burst_percent[0], pktnb_stats[1]);
776 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
777 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
778 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
779 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
782 printf(" + %d%% of %d pkts + %d%% of others]\n",
783 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
790 struct rte_port *port;
793 static const char *fwd_stats_border = "----------------------";
795 port = &ports[port_id];
796 printf("\n %s Forward statistics for port %-2d %s\n",
797 fwd_stats_border, port_id, fwd_stats_border);
799 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
800 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
802 stats->ipackets, stats->imissed,
803 (uint64_t) (stats->ipackets + stats->imissed));
805 if (cur_fwd_eng == &csum_fwd_engine)
806 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
807 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
808 if ((stats->ierrors + stats->rx_nombuf) > 0) {
809 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
810 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
813 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
815 stats->opackets, port->tx_dropped,
816 (uint64_t) (stats->opackets + port->tx_dropped));
819 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
821 stats->ipackets, stats->imissed,
822 (uint64_t) (stats->ipackets + stats->imissed));
824 if (cur_fwd_eng == &csum_fwd_engine)
825 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
826 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
827 if ((stats->ierrors + stats->rx_nombuf) > 0) {
828 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
829 printf(" RX-nombufs: %14"PRIu64"\n",
833 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
835 stats->opackets, port->tx_dropped,
836 (uint64_t) (stats->opackets + port->tx_dropped));
839 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
841 pkt_burst_stats_display("RX",
842 &port->rx_stream->rx_burst_stats);
844 pkt_burst_stats_display("TX",
845 &port->tx_stream->tx_burst_stats);
848 if (port->rx_queue_stats_mapping_enabled) {
850 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
851 printf(" Stats reg %2d RX-packets:%14"PRIu64
852 " RX-errors:%14"PRIu64
853 " RX-bytes:%14"PRIu64"\n",
854 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
858 if (port->tx_queue_stats_mapping_enabled) {
859 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
860 printf(" Stats reg %2d TX-packets:%14"PRIu64
861 " TX-bytes:%14"PRIu64"\n",
862 i, stats->q_opackets[i], stats->q_obytes[i]);
866 printf(" %s--------------------------------%s\n",
867 fwd_stats_border, fwd_stats_border);
871 fwd_stream_stats_display(streamid_t stream_id)
873 struct fwd_stream *fs;
874 static const char *fwd_top_stats_border = "-------";
876 fs = fwd_streams[stream_id];
877 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
878 (fs->fwd_dropped == 0))
880 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
881 "TX Port=%2d/Queue=%2d %s\n",
882 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
883 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
884 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
885 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
887 /* if checksum mode */
888 if (cur_fwd_eng == &csum_fwd_engine) {
889 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
890 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
893 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
894 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
895 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
900 flush_fwd_rx_queues(void)
902 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
909 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
910 uint64_t timer_period;
912 /* convert to number of cycles */
913 timer_period = rte_get_timer_hz(); /* 1 second timeout */
915 for (j = 0; j < 2; j++) {
916 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
917 for (rxq = 0; rxq < nb_rxq; rxq++) {
918 port_id = fwd_ports_ids[rxp];
920 * testpmd can stuck in the below do while loop
921 * if rte_eth_rx_burst() always returns nonzero
922 * packets. So timer is added to exit this loop
923 * after 1sec timer expiry.
925 prev_tsc = rte_rdtsc();
927 nb_rx = rte_eth_rx_burst(port_id, rxq,
928 pkts_burst, MAX_PKT_BURST);
929 for (i = 0; i < nb_rx; i++)
930 rte_pktmbuf_free(pkts_burst[i]);
932 cur_tsc = rte_rdtsc();
933 diff_tsc = cur_tsc - prev_tsc;
934 timer_tsc += diff_tsc;
935 } while ((nb_rx > 0) &&
936 (timer_tsc < timer_period));
940 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
947 struct fwd_stream **fsm;
950 #ifdef RTE_LIBRTE_BITRATE
951 uint64_t tics_per_1sec;
953 uint64_t tics_current;
954 uint8_t idx_port, cnt_ports;
956 cnt_ports = rte_eth_dev_count();
957 tics_datum = rte_rdtsc();
958 tics_per_1sec = rte_get_timer_hz();
960 fsm = &fwd_streams[fc->stream_idx];
961 nb_fs = fc->stream_nb;
963 for (sm_id = 0; sm_id < nb_fs; sm_id++)
964 (*pkt_fwd)(fsm[sm_id]);
965 #ifdef RTE_LIBRTE_BITRATE
966 if (bitrate_enabled != 0 &&
967 bitrate_lcore_id == rte_lcore_id()) {
968 tics_current = rte_rdtsc();
969 if (tics_current - tics_datum >= tics_per_1sec) {
970 /* Periodic bitrate calculation */
972 idx_port < cnt_ports;
974 rte_stats_bitrate_calc(bitrate_data,
976 tics_datum = tics_current;
980 #ifdef RTE_LIBRTE_LATENCY_STATS
981 if (latencystats_enabled != 0 &&
982 latencystats_lcore_id == rte_lcore_id())
983 rte_latencystats_update();
986 } while (! fc->stopped);
990 start_pkt_forward_on_core(void *fwd_arg)
992 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
993 cur_fwd_config.fwd_eng->packet_fwd);
998 * Run the TXONLY packet forwarding engine to send a single burst of packets.
999 * Used to start communication flows in network loopback test configurations.
1002 run_one_txonly_burst_on_core(void *fwd_arg)
1004 struct fwd_lcore *fwd_lc;
1005 struct fwd_lcore tmp_lcore;
1007 fwd_lc = (struct fwd_lcore *) fwd_arg;
1008 tmp_lcore = *fwd_lc;
1009 tmp_lcore.stopped = 1;
1010 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1015 * Launch packet forwarding:
1016 * - Setup per-port forwarding context.
1017 * - launch logical cores with their forwarding configuration.
1020 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1022 port_fwd_begin_t port_fwd_begin;
1027 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1028 if (port_fwd_begin != NULL) {
1029 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1030 (*port_fwd_begin)(fwd_ports_ids[i]);
1032 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1033 lc_id = fwd_lcores_cpuids[i];
1034 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1035 fwd_lcores[i]->stopped = 0;
1036 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1037 fwd_lcores[i], lc_id);
1039 printf("launch lcore %u failed - diag=%d\n",
1046 * Launch packet forwarding configuration.
1049 start_packet_forwarding(int with_tx_first)
1051 port_fwd_begin_t port_fwd_begin;
1052 port_fwd_end_t port_fwd_end;
1053 struct rte_port *port;
1058 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1059 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1061 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1062 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1064 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1065 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1066 (!nb_rxq || !nb_txq))
1067 rte_exit(EXIT_FAILURE,
1068 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1069 cur_fwd_eng->fwd_mode_name);
1071 if (all_ports_started() == 0) {
1072 printf("Not all ports were started\n");
1075 if (test_done == 0) {
1076 printf("Packet forwarding already started\n");
1080 if (init_fwd_streams() < 0) {
1081 printf("Fail from init_fwd_streams()\n");
1086 for (i = 0; i < nb_fwd_ports; i++) {
1087 pt_id = fwd_ports_ids[i];
1088 port = &ports[pt_id];
1089 if (!port->dcb_flag) {
1090 printf("In DCB mode, all forwarding ports must "
1091 "be configured in this mode.\n");
1095 if (nb_fwd_lcores == 1) {
1096 printf("In DCB mode,the nb forwarding cores "
1097 "should be larger than 1.\n");
1104 flush_fwd_rx_queues();
1107 pkt_fwd_config_display(&cur_fwd_config);
1108 rxtx_config_display();
1110 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1111 pt_id = fwd_ports_ids[i];
1112 port = &ports[pt_id];
1113 rte_eth_stats_get(pt_id, &port->stats);
1114 port->tx_dropped = 0;
1116 map_port_queue_stats_mapping_registers(pt_id, port);
1118 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1119 fwd_streams[sm_id]->rx_packets = 0;
1120 fwd_streams[sm_id]->tx_packets = 0;
1121 fwd_streams[sm_id]->fwd_dropped = 0;
1122 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1123 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1125 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1126 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1127 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1128 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1129 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1131 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1132 fwd_streams[sm_id]->core_cycles = 0;
1135 if (with_tx_first) {
1136 port_fwd_begin = tx_only_engine.port_fwd_begin;
1137 if (port_fwd_begin != NULL) {
1138 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1139 (*port_fwd_begin)(fwd_ports_ids[i]);
1141 while (with_tx_first--) {
1142 launch_packet_forwarding(
1143 run_one_txonly_burst_on_core);
1144 rte_eal_mp_wait_lcore();
1146 port_fwd_end = tx_only_engine.port_fwd_end;
1147 if (port_fwd_end != NULL) {
1148 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1149 (*port_fwd_end)(fwd_ports_ids[i]);
1152 launch_packet_forwarding(start_pkt_forward_on_core);
1156 stop_packet_forwarding(void)
1158 struct rte_eth_stats stats;
1159 struct rte_port *port;
1160 port_fwd_end_t port_fwd_end;
1165 uint64_t total_recv;
1166 uint64_t total_xmit;
1167 uint64_t total_rx_dropped;
1168 uint64_t total_tx_dropped;
1169 uint64_t total_rx_nombuf;
1170 uint64_t tx_dropped;
1171 uint64_t rx_bad_ip_csum;
1172 uint64_t rx_bad_l4_csum;
1173 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1174 uint64_t fwd_cycles;
1176 static const char *acc_stats_border = "+++++++++++++++";
1179 printf("Packet forwarding not started\n");
1182 printf("Telling cores to stop...");
1183 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1184 fwd_lcores[lc_id]->stopped = 1;
1185 printf("\nWaiting for lcores to finish...\n");
1186 rte_eal_mp_wait_lcore();
1187 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1188 if (port_fwd_end != NULL) {
1189 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1190 pt_id = fwd_ports_ids[i];
1191 (*port_fwd_end)(pt_id);
1194 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1197 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1198 if (cur_fwd_config.nb_fwd_streams >
1199 cur_fwd_config.nb_fwd_ports) {
1200 fwd_stream_stats_display(sm_id);
1201 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1202 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1204 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1206 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1209 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1210 tx_dropped = (uint64_t) (tx_dropped +
1211 fwd_streams[sm_id]->fwd_dropped);
1212 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1215 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1216 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1217 fwd_streams[sm_id]->rx_bad_ip_csum);
1218 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1222 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1223 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1224 fwd_streams[sm_id]->rx_bad_l4_csum);
1225 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1228 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1229 fwd_cycles = (uint64_t) (fwd_cycles +
1230 fwd_streams[sm_id]->core_cycles);
1235 total_rx_dropped = 0;
1236 total_tx_dropped = 0;
1237 total_rx_nombuf = 0;
1238 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1239 pt_id = fwd_ports_ids[i];
1241 port = &ports[pt_id];
1242 rte_eth_stats_get(pt_id, &stats);
1243 stats.ipackets -= port->stats.ipackets;
1244 port->stats.ipackets = 0;
1245 stats.opackets -= port->stats.opackets;
1246 port->stats.opackets = 0;
1247 stats.ibytes -= port->stats.ibytes;
1248 port->stats.ibytes = 0;
1249 stats.obytes -= port->stats.obytes;
1250 port->stats.obytes = 0;
1251 stats.imissed -= port->stats.imissed;
1252 port->stats.imissed = 0;
1253 stats.oerrors -= port->stats.oerrors;
1254 port->stats.oerrors = 0;
1255 stats.rx_nombuf -= port->stats.rx_nombuf;
1256 port->stats.rx_nombuf = 0;
1258 total_recv += stats.ipackets;
1259 total_xmit += stats.opackets;
1260 total_rx_dropped += stats.imissed;
1261 total_tx_dropped += port->tx_dropped;
1262 total_rx_nombuf += stats.rx_nombuf;
1264 fwd_port_stats_display(pt_id, &stats);
1266 printf("\n %s Accumulated forward statistics for all ports"
1268 acc_stats_border, acc_stats_border);
1269 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1271 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1273 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1274 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1275 if (total_rx_nombuf > 0)
1276 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1277 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1279 acc_stats_border, acc_stats_border);
1280 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1282 printf("\n CPU cycles/packet=%u (total cycles="
1283 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1284 (unsigned int)(fwd_cycles / total_recv),
1285 fwd_cycles, total_recv);
1287 printf("\nDone.\n");
1292 dev_set_link_up(portid_t pid)
1294 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1295 printf("\nSet link up fail.\n");
1299 dev_set_link_down(portid_t pid)
1301 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1302 printf("\nSet link down fail.\n");
1306 all_ports_started(void)
1309 struct rte_port *port;
1311 RTE_ETH_FOREACH_DEV(pi) {
1313 /* Check if there is a port which is not started */
1314 if ((port->port_status != RTE_PORT_STARTED) &&
1315 (port->slave_flag == 0))
1319 /* No port is not started */
1324 all_ports_stopped(void)
1327 struct rte_port *port;
1329 RTE_ETH_FOREACH_DEV(pi) {
1331 if ((port->port_status != RTE_PORT_STOPPED) &&
1332 (port->slave_flag == 0))
1340 port_is_started(portid_t port_id)
1342 if (port_id_is_invalid(port_id, ENABLED_WARN))
1345 if (ports[port_id].port_status != RTE_PORT_STARTED)
1352 port_is_closed(portid_t port_id)
1354 if (port_id_is_invalid(port_id, ENABLED_WARN))
1357 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1364 start_port(portid_t pid)
1366 int diag, need_check_link_status = -1;
1369 struct rte_port *port;
1370 struct ether_addr mac_addr;
1371 enum rte_eth_event_type event_type;
1373 if (port_id_is_invalid(pid, ENABLED_WARN))
1378 RTE_ETH_FOREACH_DEV(pi) {
1379 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1382 need_check_link_status = 0;
1384 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1385 RTE_PORT_HANDLING) == 0) {
1386 printf("Port %d is now not stopped\n", pi);
1390 if (port->need_reconfig > 0) {
1391 port->need_reconfig = 0;
1393 printf("Configuring Port %d (socket %u)\n", pi,
1395 /* configure port */
1396 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1399 if (rte_atomic16_cmpset(&(port->port_status),
1400 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1401 printf("Port %d can not be set back "
1402 "to stopped\n", pi);
1403 printf("Fail to configure port %d\n", pi);
1404 /* try to reconfigure port next time */
1405 port->need_reconfig = 1;
1409 if (port->need_reconfig_queues > 0) {
1410 port->need_reconfig_queues = 0;
1411 /* setup tx queues */
1412 for (qi = 0; qi < nb_txq; qi++) {
1413 if ((numa_support) &&
1414 (txring_numa[pi] != NUMA_NO_CONFIG))
1415 diag = rte_eth_tx_queue_setup(pi, qi,
1416 nb_txd,txring_numa[pi],
1419 diag = rte_eth_tx_queue_setup(pi, qi,
1420 nb_txd,port->socket_id,
1426 /* Fail to setup tx queue, return */
1427 if (rte_atomic16_cmpset(&(port->port_status),
1429 RTE_PORT_STOPPED) == 0)
1430 printf("Port %d can not be set back "
1431 "to stopped\n", pi);
1432 printf("Fail to configure port %d tx queues\n", pi);
1433 /* try to reconfigure queues next time */
1434 port->need_reconfig_queues = 1;
1437 /* setup rx queues */
1438 for (qi = 0; qi < nb_rxq; qi++) {
1439 if ((numa_support) &&
1440 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1441 struct rte_mempool * mp =
1442 mbuf_pool_find(rxring_numa[pi]);
1444 printf("Failed to setup RX queue:"
1445 "No mempool allocation"
1446 " on the socket %d\n",
1451 diag = rte_eth_rx_queue_setup(pi, qi,
1452 nb_rxd,rxring_numa[pi],
1453 &(port->rx_conf),mp);
1455 struct rte_mempool *mp =
1456 mbuf_pool_find(port->socket_id);
1458 printf("Failed to setup RX queue:"
1459 "No mempool allocation"
1460 " on the socket %d\n",
1464 diag = rte_eth_rx_queue_setup(pi, qi,
1465 nb_rxd,port->socket_id,
1466 &(port->rx_conf), mp);
1471 /* Fail to setup rx queue, return */
1472 if (rte_atomic16_cmpset(&(port->port_status),
1474 RTE_PORT_STOPPED) == 0)
1475 printf("Port %d can not be set back "
1476 "to stopped\n", pi);
1477 printf("Fail to configure port %d rx queues\n", pi);
1478 /* try to reconfigure queues next time */
1479 port->need_reconfig_queues = 1;
1484 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1485 event_type < RTE_ETH_EVENT_MAX;
1487 diag = rte_eth_dev_callback_register(pi,
1492 printf("Failed to setup even callback for event %d\n",
1499 if (rte_eth_dev_start(pi) < 0) {
1500 printf("Fail to start port %d\n", pi);
1502 /* Fail to setup rx queue, return */
1503 if (rte_atomic16_cmpset(&(port->port_status),
1504 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1505 printf("Port %d can not be set back to "
1510 if (rte_atomic16_cmpset(&(port->port_status),
1511 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1512 printf("Port %d can not be set into started\n", pi);
1514 rte_eth_macaddr_get(pi, &mac_addr);
1515 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1516 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1517 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1518 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1520 /* at least one port started, need checking link status */
1521 need_check_link_status = 1;
1524 if (need_check_link_status == 1 && !no_link_check)
1525 check_all_ports_link_status(RTE_PORT_ALL);
1526 else if (need_check_link_status == 0)
1527 printf("Please stop the ports first\n");
1534 stop_port(portid_t pid)
1537 struct rte_port *port;
1538 int need_check_link_status = 0;
1545 if (port_id_is_invalid(pid, ENABLED_WARN))
1548 printf("Stopping ports...\n");
1550 RTE_ETH_FOREACH_DEV(pi) {
1551 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1554 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1555 printf("Please remove port %d from forwarding configuration.\n", pi);
1559 if (port_is_bonding_slave(pi)) {
1560 printf("Please remove port %d from bonded device.\n", pi);
1565 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1566 RTE_PORT_HANDLING) == 0)
1569 rte_eth_dev_stop(pi);
1571 if (rte_atomic16_cmpset(&(port->port_status),
1572 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1573 printf("Port %d can not be set into stopped\n", pi);
1574 need_check_link_status = 1;
1576 if (need_check_link_status && !no_link_check)
1577 check_all_ports_link_status(RTE_PORT_ALL);
1583 close_port(portid_t pid)
1586 struct rte_port *port;
1588 if (port_id_is_invalid(pid, ENABLED_WARN))
1591 printf("Closing ports...\n");
1593 RTE_ETH_FOREACH_DEV(pi) {
1594 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1597 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1598 printf("Please remove port %d from forwarding configuration.\n", pi);
1602 if (port_is_bonding_slave(pi)) {
1603 printf("Please remove port %d from bonded device.\n", pi);
1608 if (rte_atomic16_cmpset(&(port->port_status),
1609 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1610 printf("Port %d is already closed\n", pi);
1614 if (rte_atomic16_cmpset(&(port->port_status),
1615 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1616 printf("Port %d is now not stopped\n", pi);
1620 if (port->flow_list)
1621 port_flow_flush(pi);
1622 rte_eth_dev_close(pi);
1624 if (rte_atomic16_cmpset(&(port->port_status),
1625 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1626 printf("Port %d cannot be set to closed\n", pi);
1633 attach_port(char *identifier)
1636 unsigned int socket_id;
1638 printf("Attaching a new port...\n");
1640 if (identifier == NULL) {
1641 printf("Invalid parameters are specified\n");
1645 if (rte_eth_dev_attach(identifier, &pi))
1648 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1649 /* if socket_id is invalid, set to 0 */
1650 if (check_socket_id(socket_id) < 0)
1652 reconfig(pi, socket_id);
1653 rte_eth_promiscuous_enable(pi);
1655 nb_ports = rte_eth_dev_count();
1657 ports[pi].port_status = RTE_PORT_STOPPED;
1659 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1664 detach_port(uint8_t port_id)
1666 char name[RTE_ETH_NAME_MAX_LEN];
1668 printf("Detaching a port...\n");
1670 if (!port_is_closed(port_id)) {
1671 printf("Please close port first\n");
1675 if (ports[port_id].flow_list)
1676 port_flow_flush(port_id);
1678 if (rte_eth_dev_detach(port_id, name))
1681 nb_ports = rte_eth_dev_count();
1683 printf("Port '%s' is detached. Now total ports is %d\n",
1695 stop_packet_forwarding();
1697 if (ports != NULL) {
1699 RTE_ETH_FOREACH_DEV(pt_id) {
1700 printf("\nShutting down port %d...\n", pt_id);
1706 printf("\nBye...\n");
1709 typedef void (*cmd_func_t)(void);
1710 struct pmd_test_command {
1711 const char *cmd_name;
1712 cmd_func_t cmd_func;
1715 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1717 /* Check the link status of all ports in up to 9s, and print them finally */
1719 check_all_ports_link_status(uint32_t port_mask)
1721 #define CHECK_INTERVAL 100 /* 100ms */
1722 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1723 uint8_t portid, count, all_ports_up, print_flag = 0;
1724 struct rte_eth_link link;
1726 printf("Checking link statuses...\n");
1728 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1730 RTE_ETH_FOREACH_DEV(portid) {
1731 if ((port_mask & (1 << portid)) == 0)
1733 memset(&link, 0, sizeof(link));
1734 rte_eth_link_get_nowait(portid, &link);
1735 /* print link status if flag set */
1736 if (print_flag == 1) {
1737 if (link.link_status)
1738 printf("Port %d Link Up - speed %u "
1739 "Mbps - %s\n", (uint8_t)portid,
1740 (unsigned)link.link_speed,
1741 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1742 ("full-duplex") : ("half-duplex\n"));
1744 printf("Port %d Link Down\n",
1748 /* clear all_ports_up flag if any link down */
1749 if (link.link_status == ETH_LINK_DOWN) {
1754 /* after finally printing all link status, get out */
1755 if (print_flag == 1)
1758 if (all_ports_up == 0) {
1760 rte_delay_ms(CHECK_INTERVAL);
1763 /* set the print_flag if all ports up or timeout */
1764 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1774 rmv_event_callback(void *arg)
1776 struct rte_eth_dev *dev;
1777 struct rte_devargs *da;
1779 uint8_t port_id = (intptr_t)arg;
1781 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1782 dev = &rte_eth_devices[port_id];
1783 da = dev->device->devargs;
1786 close_port(port_id);
1787 if (da->type == RTE_DEVTYPE_VIRTUAL)
1788 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1789 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1790 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1791 printf("removing device %s\n", name);
1792 rte_eal_dev_detach(name);
1793 dev->state = RTE_ETH_DEV_UNUSED;
1796 /* This function is used by the interrupt thread */
1798 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1800 static const char * const event_desc[] = {
1801 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1802 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1803 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1804 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1805 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1806 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1807 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1808 [RTE_ETH_EVENT_MAX] = NULL,
1811 RTE_SET_USED(param);
1813 if (type >= RTE_ETH_EVENT_MAX) {
1814 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1815 port_id, __func__, type);
1818 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1824 case RTE_ETH_EVENT_INTR_RMV:
1825 if (rte_eal_alarm_set(100000,
1826 rmv_event_callback, (void *)(intptr_t)port_id))
1827 fprintf(stderr, "Could not set up deferred device removal\n");
1835 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1839 uint8_t mapping_found = 0;
1841 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1842 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1843 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1844 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1845 tx_queue_stats_mappings[i].queue_id,
1846 tx_queue_stats_mappings[i].stats_counter_id);
1853 port->tx_queue_stats_mapping_enabled = 1;
1858 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1862 uint8_t mapping_found = 0;
1864 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1865 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1866 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1867 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1868 rx_queue_stats_mappings[i].queue_id,
1869 rx_queue_stats_mappings[i].stats_counter_id);
1876 port->rx_queue_stats_mapping_enabled = 1;
1881 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1885 diag = set_tx_queue_stats_mapping_registers(pi, port);
1887 if (diag == -ENOTSUP) {
1888 port->tx_queue_stats_mapping_enabled = 0;
1889 printf("TX queue stats mapping not supported port id=%d\n", pi);
1892 rte_exit(EXIT_FAILURE,
1893 "set_tx_queue_stats_mapping_registers "
1894 "failed for port id=%d diag=%d\n",
1898 diag = set_rx_queue_stats_mapping_registers(pi, port);
1900 if (diag == -ENOTSUP) {
1901 port->rx_queue_stats_mapping_enabled = 0;
1902 printf("RX queue stats mapping not supported port id=%d\n", pi);
1905 rte_exit(EXIT_FAILURE,
1906 "set_rx_queue_stats_mapping_registers "
1907 "failed for port id=%d diag=%d\n",
1913 rxtx_port_config(struct rte_port *port)
1915 port->rx_conf = port->dev_info.default_rxconf;
1916 port->tx_conf = port->dev_info.default_txconf;
1918 /* Check if any RX/TX parameters have been passed */
1919 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1920 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1922 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1923 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1925 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1926 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1928 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1929 port->rx_conf.rx_free_thresh = rx_free_thresh;
1931 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1932 port->rx_conf.rx_drop_en = rx_drop_en;
1934 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1935 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1937 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1938 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1940 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1941 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1943 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1944 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1946 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1947 port->tx_conf.tx_free_thresh = tx_free_thresh;
1949 if (txq_flags != RTE_PMD_PARAM_UNSET)
1950 port->tx_conf.txq_flags = txq_flags;
1954 init_port_config(void)
1957 struct rte_port *port;
1959 RTE_ETH_FOREACH_DEV(pid) {
1961 port->dev_conf.rxmode = rx_mode;
1962 port->dev_conf.fdir_conf = fdir_conf;
1964 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1965 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1967 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1968 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1971 if (port->dcb_flag == 0) {
1972 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1973 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1975 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1978 rxtx_port_config(port);
1980 rte_eth_macaddr_get(pid, &port->eth_addr);
1982 map_port_queue_stats_mapping_registers(pid, port);
1983 #ifdef RTE_NIC_BYPASS
1984 rte_eth_dev_bypass_init(pid);
1987 if (lsc_interrupt &&
1988 (rte_eth_devices[pid].data->dev_flags &
1989 RTE_ETH_DEV_INTR_LSC))
1990 port->dev_conf.intr_conf.lsc = 1;
1991 if (rmv_interrupt &&
1992 (rte_eth_devices[pid].data->dev_flags &
1993 RTE_ETH_DEV_INTR_RMV))
1994 port->dev_conf.intr_conf.rmv = 1;
1998 void set_port_slave_flag(portid_t slave_pid)
2000 struct rte_port *port;
2002 port = &ports[slave_pid];
2003 port->slave_flag = 1;
2006 void clear_port_slave_flag(portid_t slave_pid)
2008 struct rte_port *port;
2010 port = &ports[slave_pid];
2011 port->slave_flag = 0;
2014 uint8_t port_is_bonding_slave(portid_t slave_pid)
2016 struct rte_port *port;
2018 port = &ports[slave_pid];
2019 return port->slave_flag;
2022 const uint16_t vlan_tags[] = {
2023 0, 1, 2, 3, 4, 5, 6, 7,
2024 8, 9, 10, 11, 12, 13, 14, 15,
2025 16, 17, 18, 19, 20, 21, 22, 23,
2026 24, 25, 26, 27, 28, 29, 30, 31
2030 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2031 enum dcb_mode_enable dcb_mode,
2032 enum rte_eth_nb_tcs num_tcs,
2038 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2039 * given above, and the number of traffic classes available for use.
2041 if (dcb_mode == DCB_VT_ENABLED) {
2042 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2043 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2044 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2045 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2047 /* VMDQ+DCB RX and TX configurations */
2048 vmdq_rx_conf->enable_default_pool = 0;
2049 vmdq_rx_conf->default_pool = 0;
2050 vmdq_rx_conf->nb_queue_pools =
2051 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2052 vmdq_tx_conf->nb_queue_pools =
2053 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2055 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2056 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2057 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2058 vmdq_rx_conf->pool_map[i].pools =
2059 1 << (i % vmdq_rx_conf->nb_queue_pools);
2061 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2062 vmdq_rx_conf->dcb_tc[i] = i;
2063 vmdq_tx_conf->dcb_tc[i] = i;
2066 /* set DCB mode of RX and TX of multiple queues */
2067 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2068 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2070 struct rte_eth_dcb_rx_conf *rx_conf =
2071 ð_conf->rx_adv_conf.dcb_rx_conf;
2072 struct rte_eth_dcb_tx_conf *tx_conf =
2073 ð_conf->tx_adv_conf.dcb_tx_conf;
2075 rx_conf->nb_tcs = num_tcs;
2076 tx_conf->nb_tcs = num_tcs;
2078 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2079 rx_conf->dcb_tc[i] = i % num_tcs;
2080 tx_conf->dcb_tc[i] = i % num_tcs;
2082 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2083 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2084 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2088 eth_conf->dcb_capability_en =
2089 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2091 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2097 init_port_dcb_config(portid_t pid,
2098 enum dcb_mode_enable dcb_mode,
2099 enum rte_eth_nb_tcs num_tcs,
2102 struct rte_eth_conf port_conf;
2103 struct rte_port *rte_port;
2107 rte_port = &ports[pid];
2109 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2110 /* Enter DCB configuration status */
2113 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2114 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2117 port_conf.rxmode.hw_vlan_filter = 1;
2120 * Write the configuration into the device.
2121 * Set the numbers of RX & TX queues to 0, so
2122 * the RX & TX queues will not be setup.
2124 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2126 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2128 /* If dev_info.vmdq_pool_base is greater than 0,
2129 * the queue id of vmdq pools is started after pf queues.
2131 if (dcb_mode == DCB_VT_ENABLED &&
2132 rte_port->dev_info.vmdq_pool_base > 0) {
2133 printf("VMDQ_DCB multi-queue mode is nonsensical"
2134 " for port %d.", pid);
2138 /* Assume the ports in testpmd have the same dcb capability
2139 * and has the same number of rxq and txq in dcb mode
2141 if (dcb_mode == DCB_VT_ENABLED) {
2142 if (rte_port->dev_info.max_vfs > 0) {
2143 nb_rxq = rte_port->dev_info.nb_rx_queues;
2144 nb_txq = rte_port->dev_info.nb_tx_queues;
2146 nb_rxq = rte_port->dev_info.max_rx_queues;
2147 nb_txq = rte_port->dev_info.max_tx_queues;
2150 /*if vt is disabled, use all pf queues */
2151 if (rte_port->dev_info.vmdq_pool_base == 0) {
2152 nb_rxq = rte_port->dev_info.max_rx_queues;
2153 nb_txq = rte_port->dev_info.max_tx_queues;
2155 nb_rxq = (queueid_t)num_tcs;
2156 nb_txq = (queueid_t)num_tcs;
2160 rx_free_thresh = 64;
2162 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2164 rxtx_port_config(rte_port);
2166 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2167 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2168 rx_vft_set(pid, vlan_tags[i], 1);
2170 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2171 map_port_queue_stats_mapping_registers(pid, rte_port);
2173 rte_port->dcb_flag = 1;
2181 /* Configuration of Ethernet ports. */
2182 ports = rte_zmalloc("testpmd: ports",
2183 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2184 RTE_CACHE_LINE_SIZE);
2185 if (ports == NULL) {
2186 rte_exit(EXIT_FAILURE,
2187 "rte_zmalloc(%d struct rte_port) failed\n",
2200 signal_handler(int signum)
2202 if (signum == SIGINT || signum == SIGTERM) {
2203 printf("\nSignal %d received, preparing to exit...\n",
2205 #ifdef RTE_LIBRTE_PDUMP
2206 /* uninitialize packet capture framework */
2209 #ifdef RTE_LIBRTE_LATENCY_STATS
2210 rte_latencystats_uninit();
2213 /* exit with the expected status */
2214 signal(signum, SIG_DFL);
2215 kill(getpid(), signum);
2220 main(int argc, char** argv)
2225 signal(SIGINT, signal_handler);
2226 signal(SIGTERM, signal_handler);
2228 diag = rte_eal_init(argc, argv);
2230 rte_panic("Cannot init EAL\n");
2232 #ifdef RTE_LIBRTE_PDUMP
2233 /* initialize packet capture framework */
2234 rte_pdump_init(NULL);
2237 nb_ports = (portid_t) rte_eth_dev_count();
2239 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2241 /* allocate port structures, and init them */
2244 set_def_fwd_config();
2246 rte_panic("Empty set of forwarding logical cores - check the "
2247 "core mask supplied in the command parameters\n");
2249 /* Bitrate/latency stats disabled by default */
2250 #ifdef RTE_LIBRTE_BITRATE
2251 bitrate_enabled = 0;
2253 #ifdef RTE_LIBRTE_LATENCY_STATS
2254 latencystats_enabled = 0;
2260 launch_args_parse(argc, argv);
2262 if (!nb_rxq && !nb_txq)
2263 printf("Warning: Either rx or tx queues should be non-zero\n");
2265 if (nb_rxq > 1 && nb_rxq > nb_txq)
2266 printf("Warning: nb_rxq=%d enables RSS configuration, "
2267 "but nb_txq=%d will prevent to fully test it.\n",
2271 if (start_port(RTE_PORT_ALL) != 0)
2272 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2274 /* set all ports to promiscuous mode by default */
2275 RTE_ETH_FOREACH_DEV(port_id)
2276 rte_eth_promiscuous_enable(port_id);
2278 /* Init metrics library */
2279 rte_metrics_init(rte_socket_id());
2281 #ifdef RTE_LIBRTE_LATENCY_STATS
2282 if (latencystats_enabled != 0) {
2283 int ret = rte_latencystats_init(1, NULL);
2285 printf("Warning: latencystats init()"
2286 " returned error %d\n", ret);
2287 printf("Latencystats running on lcore %d\n",
2288 latencystats_lcore_id);
2292 /* Setup bitrate stats */
2293 #ifdef RTE_LIBRTE_BITRATE
2294 if (bitrate_enabled != 0) {
2295 bitrate_data = rte_stats_bitrate_create();
2296 if (bitrate_data == NULL)
2297 rte_exit(EXIT_FAILURE,
2298 "Could not allocate bitrate data.\n");
2299 rte_stats_bitrate_reg(bitrate_data);
2303 #ifdef RTE_LIBRTE_CMDLINE
2304 if (strlen(cmdline_filename) != 0)
2305 cmdline_read_from_file(cmdline_filename);
2307 if (interactive == 1) {
2309 printf("Start automatic packet forwarding\n");
2310 start_packet_forwarding(0);
2320 printf("No commandline core given, start packet forwarding\n");
2321 start_packet_forwarding(0);
2322 printf("Press enter to exit\n");
2323 rc = read(0, &c, 1);