4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
100 * NUMA support configuration.
101 * When set, the NUMA support attempts to dispatch the allocation of the
102 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103 * probed ports among the CPU sockets 0 and 1.
104 * Otherwise, all memory is allocated from CPU socket 0.
106 uint8_t numa_support = 1; /**< numa enabled by default */
109 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112 uint8_t socket_num = UMA_NO_CONFIG;
115 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120 * Record the Ethernet address of peer target ports to which packets are
122 * Must be instantiated with the ethernet addresses of peer traffic generator
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
129 * Probed Target Environment.
131 struct rte_port *ports; /**< For all probed ethernet ports. */
132 portid_t nb_ports; /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
137 * Test Forwarding Configuration.
138 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t nb_cfg_ports; /**< Number of configured ports. */
144 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
153 * Forwarding engines.
155 struct fwd_engine * fwd_engines[] = {
164 #ifdef RTE_LIBRTE_IEEE1588
165 &ieee1588_fwd_engine,
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
178 * specified on command-line. */
181 * Configuration of packet segments used by the "txonly" processing engine.
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 TXONLY_DEF_PACKET_LEN,
187 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
202 * Configurable number of RX/TX queues.
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
208 * Configurable number of RX/TX ring descriptors.
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 #define RTE_PMD_PARAM_UNSET -1
217 * Configurable values of RX and TX ring threshold registers.
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of RX free threshold.
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX drop enable.
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
239 * Configurable value of TX free threshold.
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX RS bit threshold.
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX queue flags.
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Avoids to check link status when starting/stopping a port.
271 uint8_t no_link_check = 0; /* check by default */
274 * Enable link status change notification
276 uint8_t lsc_interrupt = 1; /* enabled by default */
279 * Enable device removal notification.
281 uint8_t rmv_interrupt = 1; /* enabled by default */
284 * NIC bypass mode configuration options.
286 #ifdef RTE_NIC_BYPASS
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
293 #ifdef RTE_LIBRTE_LATENCY_STATS
296 * Set when latency stats is enabled in the commandline
298 uint8_t latencystats_enabled;
301 * Lcore ID to serive latency statistics.
303 lcoreid_t latencystats_lcore_id = -1;
308 * Ethernet device configuration.
310 struct rte_eth_rxmode rx_mode = {
311 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313 .header_split = 0, /**< Header Split disabled. */
314 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
317 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
319 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
322 struct rte_fdir_conf fdir_conf = {
323 .mode = RTE_FDIR_MODE_NONE,
324 .pballoc = RTE_FDIR_PBALLOC_64K,
325 .status = RTE_FDIR_REPORT_STATUS,
327 .vlan_tci_mask = 0x0,
329 .src_ip = 0xFFFFFFFF,
330 .dst_ip = 0xFFFFFFFF,
333 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336 .src_port_mask = 0xFFFF,
337 .dst_port_mask = 0xFFFF,
338 .mac_addr_byte_mask = 0xFF,
339 .tunnel_type_mask = 1,
340 .tunnel_id_mask = 0xFFFFFFFF,
345 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
348 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
351 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353 uint16_t nb_tx_queue_stats_mappings = 0;
354 uint16_t nb_rx_queue_stats_mappings = 0;
356 unsigned max_socket = 0;
358 #ifdef RTE_LIBRTE_BITRATE
359 /* Bitrate statistics */
360 struct rte_stats_bitrates *bitrate_data;
361 lcoreid_t bitrate_lcore_id;
362 uint8_t bitrate_enabled;
365 /* Forward function declarations */
366 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
367 static void check_all_ports_link_status(uint32_t port_mask);
368 static void eth_event_callback(uint8_t port_id,
369 enum rte_eth_event_type type,
373 * Check if all the ports are started.
374 * If yes, return positive value. If not, return zero.
376 static int all_ports_started(void);
379 * Setup default configuration.
382 set_default_fwd_lcores_config(void)
386 unsigned int sock_num;
389 for (i = 0; i < RTE_MAX_LCORE; i++) {
390 sock_num = rte_lcore_to_socket_id(i) + 1;
391 if (sock_num > max_socket) {
392 if (sock_num > RTE_MAX_NUMA_NODES)
393 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
394 max_socket = sock_num;
396 if (!rte_lcore_is_enabled(i))
398 if (i == rte_get_master_lcore())
400 fwd_lcores_cpuids[nb_lc++] = i;
402 nb_lcores = (lcoreid_t) nb_lc;
403 nb_cfg_lcores = nb_lcores;
408 set_def_peer_eth_addrs(void)
412 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
413 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
414 peer_eth_addrs[i].addr_bytes[5] = i;
419 set_default_fwd_ports_config(void)
423 for (pt_id = 0; pt_id < nb_ports; pt_id++)
424 fwd_ports_ids[pt_id] = pt_id;
426 nb_cfg_ports = nb_ports;
427 nb_fwd_ports = nb_ports;
431 set_def_fwd_config(void)
433 set_default_fwd_lcores_config();
434 set_def_peer_eth_addrs();
435 set_default_fwd_ports_config();
439 * Configuration initialisation done once at init time.
442 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
443 unsigned int socket_id)
445 char pool_name[RTE_MEMPOOL_NAMESIZE];
446 struct rte_mempool *rte_mp = NULL;
449 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
450 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
453 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
454 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
456 #ifdef RTE_LIBRTE_PMD_XENVIRT
457 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
458 (unsigned) mb_mempool_cache,
459 sizeof(struct rte_pktmbuf_pool_private),
460 rte_pktmbuf_pool_init, NULL,
461 rte_pktmbuf_init, NULL,
465 /* if the former XEN allocation failed fall back to normal allocation */
466 if (rte_mp == NULL) {
468 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
469 mb_size, (unsigned) mb_mempool_cache,
470 sizeof(struct rte_pktmbuf_pool_private),
475 if (rte_mempool_populate_anon(rte_mp) == 0) {
476 rte_mempool_free(rte_mp);
480 rte_pktmbuf_pool_init(rte_mp, NULL);
481 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
483 /* wrapper to rte_mempool_create() */
484 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
485 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
490 if (rte_mp == NULL) {
491 rte_exit(EXIT_FAILURE,
492 "Creation of mbuf pool for socket %u failed: %s\n",
493 socket_id, rte_strerror(rte_errno));
494 } else if (verbose_level > 0) {
495 rte_mempool_dump(stdout, rte_mp);
500 * Check given socket id is valid or not with NUMA mode,
501 * if valid, return 0, else return -1
504 check_socket_id(const unsigned int socket_id)
506 static int warning_once = 0;
508 if (socket_id >= max_socket) {
509 if (!warning_once && numa_support)
510 printf("Warning: NUMA should be configured manually by"
511 " using --port-numa-config and"
512 " --ring-numa-config parameters along with"
524 struct rte_port *port;
525 struct rte_mempool *mbp;
526 unsigned int nb_mbuf_per_pool;
528 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
530 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
531 /* Configuration of logical cores. */
532 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
533 sizeof(struct fwd_lcore *) * nb_lcores,
534 RTE_CACHE_LINE_SIZE);
535 if (fwd_lcores == NULL) {
536 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
537 "failed\n", nb_lcores);
539 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
540 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
541 sizeof(struct fwd_lcore),
542 RTE_CACHE_LINE_SIZE);
543 if (fwd_lcores[lc_id] == NULL) {
544 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
547 fwd_lcores[lc_id]->cpuid_idx = lc_id;
550 RTE_ETH_FOREACH_DEV(pid) {
552 rte_eth_dev_info_get(pid, &port->dev_info);
555 if (port_numa[pid] != NUMA_NO_CONFIG)
556 port_per_socket[port_numa[pid]]++;
558 uint32_t socket_id = rte_eth_dev_socket_id(pid);
560 /* if socket_id is invalid, set to 0 */
561 if (check_socket_id(socket_id) < 0)
563 port_per_socket[socket_id]++;
567 /* set flag to initialize port/queue */
568 port->need_reconfig = 1;
569 port->need_reconfig_queues = 1;
573 * Create pools of mbuf.
574 * If NUMA support is disabled, create a single pool of mbuf in
575 * socket 0 memory by default.
576 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
578 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
579 * nb_txd can be configured at run time.
581 if (param_total_num_mbufs)
582 nb_mbuf_per_pool = param_total_num_mbufs;
584 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
585 (nb_lcores * mb_mempool_cache) +
586 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
587 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
593 for (i = 0; i < max_socket; i++)
594 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
596 if (socket_num == UMA_NO_CONFIG)
597 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
599 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
606 * Records which Mbuf pool to use by each logical core, if needed.
608 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
609 mbp = mbuf_pool_find(
610 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
613 mbp = mbuf_pool_find(0);
614 fwd_lcores[lc_id]->mbp = mbp;
617 /* Configuration of packet forwarding streams. */
618 if (init_fwd_streams() < 0)
619 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
626 reconfig(portid_t new_port_id, unsigned socket_id)
628 struct rte_port *port;
630 /* Reconfiguration of Ethernet ports. */
631 port = &ports[new_port_id];
632 rte_eth_dev_info_get(new_port_id, &port->dev_info);
634 /* set flag to initialize port/queue */
635 port->need_reconfig = 1;
636 port->need_reconfig_queues = 1;
637 port->socket_id = socket_id;
644 init_fwd_streams(void)
647 struct rte_port *port;
648 streamid_t sm_id, nb_fwd_streams_new;
651 /* set socket id according to numa or not */
652 RTE_ETH_FOREACH_DEV(pid) {
654 if (nb_rxq > port->dev_info.max_rx_queues) {
655 printf("Fail: nb_rxq(%d) is greater than "
656 "max_rx_queues(%d)\n", nb_rxq,
657 port->dev_info.max_rx_queues);
660 if (nb_txq > port->dev_info.max_tx_queues) {
661 printf("Fail: nb_txq(%d) is greater than "
662 "max_tx_queues(%d)\n", nb_txq,
663 port->dev_info.max_tx_queues);
667 if (port_numa[pid] != NUMA_NO_CONFIG)
668 port->socket_id = port_numa[pid];
670 port->socket_id = rte_eth_dev_socket_id(pid);
672 /* if socket_id is invalid, set to 0 */
673 if (check_socket_id(port->socket_id) < 0)
678 if (socket_num == UMA_NO_CONFIG)
681 port->socket_id = socket_num;
685 q = RTE_MAX(nb_rxq, nb_txq);
687 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
690 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
691 if (nb_fwd_streams_new == nb_fwd_streams)
694 if (fwd_streams != NULL) {
695 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
696 if (fwd_streams[sm_id] == NULL)
698 rte_free(fwd_streams[sm_id]);
699 fwd_streams[sm_id] = NULL;
701 rte_free(fwd_streams);
706 nb_fwd_streams = nb_fwd_streams_new;
707 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
708 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
709 if (fwd_streams == NULL)
710 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
711 "failed\n", nb_fwd_streams);
713 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
714 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
715 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
716 if (fwd_streams[sm_id] == NULL)
717 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
724 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
726 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
728 unsigned int total_burst;
729 unsigned int nb_burst;
730 unsigned int burst_stats[3];
731 uint16_t pktnb_stats[3];
733 int burst_percent[3];
736 * First compute the total number of packet bursts and the
737 * two highest numbers of bursts of the same number of packets.
740 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
741 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
742 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
743 nb_burst = pbs->pkt_burst_spread[nb_pkt];
746 total_burst += nb_burst;
747 if (nb_burst > burst_stats[0]) {
748 burst_stats[1] = burst_stats[0];
749 pktnb_stats[1] = pktnb_stats[0];
750 burst_stats[0] = nb_burst;
751 pktnb_stats[0] = nb_pkt;
754 if (total_burst == 0)
756 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
757 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
758 burst_percent[0], (int) pktnb_stats[0]);
759 if (burst_stats[0] == total_burst) {
763 if (burst_stats[0] + burst_stats[1] == total_burst) {
764 printf(" + %d%% of %d pkts]\n",
765 100 - burst_percent[0], pktnb_stats[1]);
768 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
769 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
770 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
771 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
774 printf(" + %d%% of %d pkts + %d%% of others]\n",
775 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
777 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
780 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
782 struct rte_port *port;
785 static const char *fwd_stats_border = "----------------------";
787 port = &ports[port_id];
788 printf("\n %s Forward statistics for port %-2d %s\n",
789 fwd_stats_border, port_id, fwd_stats_border);
791 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
792 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
794 stats->ipackets, stats->imissed,
795 (uint64_t) (stats->ipackets + stats->imissed));
797 if (cur_fwd_eng == &csum_fwd_engine)
798 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
799 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
800 if ((stats->ierrors + stats->rx_nombuf) > 0) {
801 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
802 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
805 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
807 stats->opackets, port->tx_dropped,
808 (uint64_t) (stats->opackets + port->tx_dropped));
811 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
813 stats->ipackets, stats->imissed,
814 (uint64_t) (stats->ipackets + stats->imissed));
816 if (cur_fwd_eng == &csum_fwd_engine)
817 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
818 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
819 if ((stats->ierrors + stats->rx_nombuf) > 0) {
820 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
821 printf(" RX-nombufs: %14"PRIu64"\n",
825 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
827 stats->opackets, port->tx_dropped,
828 (uint64_t) (stats->opackets + port->tx_dropped));
831 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
833 pkt_burst_stats_display("RX",
834 &port->rx_stream->rx_burst_stats);
836 pkt_burst_stats_display("TX",
837 &port->tx_stream->tx_burst_stats);
840 if (port->rx_queue_stats_mapping_enabled) {
842 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
843 printf(" Stats reg %2d RX-packets:%14"PRIu64
844 " RX-errors:%14"PRIu64
845 " RX-bytes:%14"PRIu64"\n",
846 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
850 if (port->tx_queue_stats_mapping_enabled) {
851 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
852 printf(" Stats reg %2d TX-packets:%14"PRIu64
853 " TX-bytes:%14"PRIu64"\n",
854 i, stats->q_opackets[i], stats->q_obytes[i]);
858 printf(" %s--------------------------------%s\n",
859 fwd_stats_border, fwd_stats_border);
863 fwd_stream_stats_display(streamid_t stream_id)
865 struct fwd_stream *fs;
866 static const char *fwd_top_stats_border = "-------";
868 fs = fwd_streams[stream_id];
869 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
870 (fs->fwd_dropped == 0))
872 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
873 "TX Port=%2d/Queue=%2d %s\n",
874 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
875 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
876 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
877 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
879 /* if checksum mode */
880 if (cur_fwd_eng == &csum_fwd_engine) {
881 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
882 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
885 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
886 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
887 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
892 flush_fwd_rx_queues(void)
894 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
901 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
902 uint64_t timer_period;
904 /* convert to number of cycles */
905 timer_period = rte_get_timer_hz(); /* 1 second timeout */
907 for (j = 0; j < 2; j++) {
908 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
909 for (rxq = 0; rxq < nb_rxq; rxq++) {
910 port_id = fwd_ports_ids[rxp];
912 * testpmd can stuck in the below do while loop
913 * if rte_eth_rx_burst() always returns nonzero
914 * packets. So timer is added to exit this loop
915 * after 1sec timer expiry.
917 prev_tsc = rte_rdtsc();
919 nb_rx = rte_eth_rx_burst(port_id, rxq,
920 pkts_burst, MAX_PKT_BURST);
921 for (i = 0; i < nb_rx; i++)
922 rte_pktmbuf_free(pkts_burst[i]);
924 cur_tsc = rte_rdtsc();
925 diff_tsc = cur_tsc - prev_tsc;
926 timer_tsc += diff_tsc;
927 } while ((nb_rx > 0) &&
928 (timer_tsc < timer_period));
932 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
937 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
939 struct fwd_stream **fsm;
942 #ifdef RTE_LIBRTE_BITRATE
943 uint64_t tics_per_1sec;
945 uint64_t tics_current;
946 uint8_t idx_port, cnt_ports;
948 cnt_ports = rte_eth_dev_count();
949 tics_datum = rte_rdtsc();
950 tics_per_1sec = rte_get_timer_hz();
952 fsm = &fwd_streams[fc->stream_idx];
953 nb_fs = fc->stream_nb;
955 for (sm_id = 0; sm_id < nb_fs; sm_id++)
956 (*pkt_fwd)(fsm[sm_id]);
957 #ifdef RTE_LIBRTE_BITRATE
958 if (bitrate_enabled != 0 &&
959 bitrate_lcore_id == rte_lcore_id()) {
960 tics_current = rte_rdtsc();
961 if (tics_current - tics_datum >= tics_per_1sec) {
962 /* Periodic bitrate calculation */
964 idx_port < cnt_ports;
966 rte_stats_bitrate_calc(bitrate_data,
968 tics_datum = tics_current;
972 #ifdef RTE_LIBRTE_LATENCY_STATS
973 if (latencystats_lcore_id == rte_lcore_id())
974 rte_latencystats_update();
977 } while (! fc->stopped);
981 start_pkt_forward_on_core(void *fwd_arg)
983 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
984 cur_fwd_config.fwd_eng->packet_fwd);
989 * Run the TXONLY packet forwarding engine to send a single burst of packets.
990 * Used to start communication flows in network loopback test configurations.
993 run_one_txonly_burst_on_core(void *fwd_arg)
995 struct fwd_lcore *fwd_lc;
996 struct fwd_lcore tmp_lcore;
998 fwd_lc = (struct fwd_lcore *) fwd_arg;
1000 tmp_lcore.stopped = 1;
1001 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1006 * Launch packet forwarding:
1007 * - Setup per-port forwarding context.
1008 * - launch logical cores with their forwarding configuration.
1011 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1013 port_fwd_begin_t port_fwd_begin;
1018 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1019 if (port_fwd_begin != NULL) {
1020 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1021 (*port_fwd_begin)(fwd_ports_ids[i]);
1023 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1024 lc_id = fwd_lcores_cpuids[i];
1025 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1026 fwd_lcores[i]->stopped = 0;
1027 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1028 fwd_lcores[i], lc_id);
1030 printf("launch lcore %u failed - diag=%d\n",
1037 * Launch packet forwarding configuration.
1040 start_packet_forwarding(int with_tx_first)
1042 port_fwd_begin_t port_fwd_begin;
1043 port_fwd_end_t port_fwd_end;
1044 struct rte_port *port;
1049 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1050 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1052 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1053 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1055 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1056 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1057 (!nb_rxq || !nb_txq))
1058 rte_exit(EXIT_FAILURE,
1059 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1060 cur_fwd_eng->fwd_mode_name);
1062 if (all_ports_started() == 0) {
1063 printf("Not all ports were started\n");
1066 if (test_done == 0) {
1067 printf("Packet forwarding already started\n");
1071 if (init_fwd_streams() < 0) {
1072 printf("Fail from init_fwd_streams()\n");
1077 for (i = 0; i < nb_fwd_ports; i++) {
1078 pt_id = fwd_ports_ids[i];
1079 port = &ports[pt_id];
1080 if (!port->dcb_flag) {
1081 printf("In DCB mode, all forwarding ports must "
1082 "be configured in this mode.\n");
1086 if (nb_fwd_lcores == 1) {
1087 printf("In DCB mode,the nb forwarding cores "
1088 "should be larger than 1.\n");
1095 flush_fwd_rx_queues();
1098 pkt_fwd_config_display(&cur_fwd_config);
1099 rxtx_config_display();
1101 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1102 pt_id = fwd_ports_ids[i];
1103 port = &ports[pt_id];
1104 rte_eth_stats_get(pt_id, &port->stats);
1105 port->tx_dropped = 0;
1107 map_port_queue_stats_mapping_registers(pt_id, port);
1109 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1110 fwd_streams[sm_id]->rx_packets = 0;
1111 fwd_streams[sm_id]->tx_packets = 0;
1112 fwd_streams[sm_id]->fwd_dropped = 0;
1113 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1114 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1116 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1117 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1118 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1119 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1120 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1122 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1123 fwd_streams[sm_id]->core_cycles = 0;
1126 if (with_tx_first) {
1127 port_fwd_begin = tx_only_engine.port_fwd_begin;
1128 if (port_fwd_begin != NULL) {
1129 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1130 (*port_fwd_begin)(fwd_ports_ids[i]);
1132 while (with_tx_first--) {
1133 launch_packet_forwarding(
1134 run_one_txonly_burst_on_core);
1135 rte_eal_mp_wait_lcore();
1137 port_fwd_end = tx_only_engine.port_fwd_end;
1138 if (port_fwd_end != NULL) {
1139 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1140 (*port_fwd_end)(fwd_ports_ids[i]);
1143 launch_packet_forwarding(start_pkt_forward_on_core);
1147 stop_packet_forwarding(void)
1149 struct rte_eth_stats stats;
1150 struct rte_port *port;
1151 port_fwd_end_t port_fwd_end;
1156 uint64_t total_recv;
1157 uint64_t total_xmit;
1158 uint64_t total_rx_dropped;
1159 uint64_t total_tx_dropped;
1160 uint64_t total_rx_nombuf;
1161 uint64_t tx_dropped;
1162 uint64_t rx_bad_ip_csum;
1163 uint64_t rx_bad_l4_csum;
1164 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1165 uint64_t fwd_cycles;
1167 static const char *acc_stats_border = "+++++++++++++++";
1170 printf("Packet forwarding not started\n");
1173 printf("Telling cores to stop...");
1174 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1175 fwd_lcores[lc_id]->stopped = 1;
1176 printf("\nWaiting for lcores to finish...\n");
1177 rte_eal_mp_wait_lcore();
1178 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1179 if (port_fwd_end != NULL) {
1180 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1181 pt_id = fwd_ports_ids[i];
1182 (*port_fwd_end)(pt_id);
1185 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1188 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1189 if (cur_fwd_config.nb_fwd_streams >
1190 cur_fwd_config.nb_fwd_ports) {
1191 fwd_stream_stats_display(sm_id);
1192 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1193 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1195 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1197 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1200 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1201 tx_dropped = (uint64_t) (tx_dropped +
1202 fwd_streams[sm_id]->fwd_dropped);
1203 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1206 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1207 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1208 fwd_streams[sm_id]->rx_bad_ip_csum);
1209 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1213 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1214 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1215 fwd_streams[sm_id]->rx_bad_l4_csum);
1216 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1219 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1220 fwd_cycles = (uint64_t) (fwd_cycles +
1221 fwd_streams[sm_id]->core_cycles);
1226 total_rx_dropped = 0;
1227 total_tx_dropped = 0;
1228 total_rx_nombuf = 0;
1229 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1230 pt_id = fwd_ports_ids[i];
1232 port = &ports[pt_id];
1233 rte_eth_stats_get(pt_id, &stats);
1234 stats.ipackets -= port->stats.ipackets;
1235 port->stats.ipackets = 0;
1236 stats.opackets -= port->stats.opackets;
1237 port->stats.opackets = 0;
1238 stats.ibytes -= port->stats.ibytes;
1239 port->stats.ibytes = 0;
1240 stats.obytes -= port->stats.obytes;
1241 port->stats.obytes = 0;
1242 stats.imissed -= port->stats.imissed;
1243 port->stats.imissed = 0;
1244 stats.oerrors -= port->stats.oerrors;
1245 port->stats.oerrors = 0;
1246 stats.rx_nombuf -= port->stats.rx_nombuf;
1247 port->stats.rx_nombuf = 0;
1249 total_recv += stats.ipackets;
1250 total_xmit += stats.opackets;
1251 total_rx_dropped += stats.imissed;
1252 total_tx_dropped += port->tx_dropped;
1253 total_rx_nombuf += stats.rx_nombuf;
1255 fwd_port_stats_display(pt_id, &stats);
1257 printf("\n %s Accumulated forward statistics for all ports"
1259 acc_stats_border, acc_stats_border);
1260 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1262 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1264 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1265 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1266 if (total_rx_nombuf > 0)
1267 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1268 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1270 acc_stats_border, acc_stats_border);
1271 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 printf("\n CPU cycles/packet=%u (total cycles="
1274 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1275 (unsigned int)(fwd_cycles / total_recv),
1276 fwd_cycles, total_recv);
1278 printf("\nDone.\n");
1283 dev_set_link_up(portid_t pid)
1285 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1286 printf("\nSet link up fail.\n");
1290 dev_set_link_down(portid_t pid)
1292 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1293 printf("\nSet link down fail.\n");
1297 all_ports_started(void)
1300 struct rte_port *port;
1302 RTE_ETH_FOREACH_DEV(pi) {
1304 /* Check if there is a port which is not started */
1305 if ((port->port_status != RTE_PORT_STARTED) &&
1306 (port->slave_flag == 0))
1310 /* No port is not started */
1315 all_ports_stopped(void)
1318 struct rte_port *port;
1320 RTE_ETH_FOREACH_DEV(pi) {
1322 if ((port->port_status != RTE_PORT_STOPPED) &&
1323 (port->slave_flag == 0))
1331 port_is_started(portid_t port_id)
1333 if (port_id_is_invalid(port_id, ENABLED_WARN))
1336 if (ports[port_id].port_status != RTE_PORT_STARTED)
1343 port_is_closed(portid_t port_id)
1345 if (port_id_is_invalid(port_id, ENABLED_WARN))
1348 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1355 start_port(portid_t pid)
1357 int diag, need_check_link_status = -1;
1360 struct rte_port *port;
1361 struct ether_addr mac_addr;
1362 enum rte_eth_event_type event_type;
1364 if (port_id_is_invalid(pid, ENABLED_WARN))
1369 RTE_ETH_FOREACH_DEV(pi) {
1370 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1373 need_check_link_status = 0;
1375 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1376 RTE_PORT_HANDLING) == 0) {
1377 printf("Port %d is now not stopped\n", pi);
1381 if (port->need_reconfig > 0) {
1382 port->need_reconfig = 0;
1384 printf("Configuring Port %d (socket %u)\n", pi,
1386 /* configure port */
1387 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1390 if (rte_atomic16_cmpset(&(port->port_status),
1391 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1392 printf("Port %d can not be set back "
1393 "to stopped\n", pi);
1394 printf("Fail to configure port %d\n", pi);
1395 /* try to reconfigure port next time */
1396 port->need_reconfig = 1;
1400 if (port->need_reconfig_queues > 0) {
1401 port->need_reconfig_queues = 0;
1402 /* setup tx queues */
1403 for (qi = 0; qi < nb_txq; qi++) {
1404 if ((numa_support) &&
1405 (txring_numa[pi] != NUMA_NO_CONFIG))
1406 diag = rte_eth_tx_queue_setup(pi, qi,
1407 nb_txd,txring_numa[pi],
1410 diag = rte_eth_tx_queue_setup(pi, qi,
1411 nb_txd,port->socket_id,
1417 /* Fail to setup tx queue, return */
1418 if (rte_atomic16_cmpset(&(port->port_status),
1420 RTE_PORT_STOPPED) == 0)
1421 printf("Port %d can not be set back "
1422 "to stopped\n", pi);
1423 printf("Fail to configure port %d tx queues\n", pi);
1424 /* try to reconfigure queues next time */
1425 port->need_reconfig_queues = 1;
1428 /* setup rx queues */
1429 for (qi = 0; qi < nb_rxq; qi++) {
1430 if ((numa_support) &&
1431 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1432 struct rte_mempool * mp =
1433 mbuf_pool_find(rxring_numa[pi]);
1435 printf("Failed to setup RX queue:"
1436 "No mempool allocation"
1437 " on the socket %d\n",
1442 diag = rte_eth_rx_queue_setup(pi, qi,
1443 nb_rxd,rxring_numa[pi],
1444 &(port->rx_conf),mp);
1446 struct rte_mempool *mp =
1447 mbuf_pool_find(port->socket_id);
1449 printf("Failed to setup RX queue:"
1450 "No mempool allocation"
1451 " on the socket %d\n",
1455 diag = rte_eth_rx_queue_setup(pi, qi,
1456 nb_rxd,port->socket_id,
1457 &(port->rx_conf), mp);
1462 /* Fail to setup rx queue, return */
1463 if (rte_atomic16_cmpset(&(port->port_status),
1465 RTE_PORT_STOPPED) == 0)
1466 printf("Port %d can not be set back "
1467 "to stopped\n", pi);
1468 printf("Fail to configure port %d rx queues\n", pi);
1469 /* try to reconfigure queues next time */
1470 port->need_reconfig_queues = 1;
1475 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1476 event_type < RTE_ETH_EVENT_MAX;
1478 diag = rte_eth_dev_callback_register(pi,
1483 printf("Failed to setup even callback for event %d\n",
1490 if (rte_eth_dev_start(pi) < 0) {
1491 printf("Fail to start port %d\n", pi);
1493 /* Fail to setup rx queue, return */
1494 if (rte_atomic16_cmpset(&(port->port_status),
1495 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1496 printf("Port %d can not be set back to "
1501 if (rte_atomic16_cmpset(&(port->port_status),
1502 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1503 printf("Port %d can not be set into started\n", pi);
1505 rte_eth_macaddr_get(pi, &mac_addr);
1506 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1507 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1508 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1509 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1511 /* at least one port started, need checking link status */
1512 need_check_link_status = 1;
1515 if (need_check_link_status == 1 && !no_link_check)
1516 check_all_ports_link_status(RTE_PORT_ALL);
1517 else if (need_check_link_status == 0)
1518 printf("Please stop the ports first\n");
1525 stop_port(portid_t pid)
1528 struct rte_port *port;
1529 int need_check_link_status = 0;
1536 if (port_id_is_invalid(pid, ENABLED_WARN))
1539 printf("Stopping ports...\n");
1541 RTE_ETH_FOREACH_DEV(pi) {
1542 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1545 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1546 printf("Please remove port %d from forwarding configuration.\n", pi);
1550 if (port_is_bonding_slave(pi)) {
1551 printf("Please remove port %d from bonded device.\n", pi);
1556 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1557 RTE_PORT_HANDLING) == 0)
1560 rte_eth_dev_stop(pi);
1562 if (rte_atomic16_cmpset(&(port->port_status),
1563 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1564 printf("Port %d can not be set into stopped\n", pi);
1565 need_check_link_status = 1;
1567 if (need_check_link_status && !no_link_check)
1568 check_all_ports_link_status(RTE_PORT_ALL);
1574 close_port(portid_t pid)
1577 struct rte_port *port;
1579 if (port_id_is_invalid(pid, ENABLED_WARN))
1582 printf("Closing ports...\n");
1584 RTE_ETH_FOREACH_DEV(pi) {
1585 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1588 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1589 printf("Please remove port %d from forwarding configuration.\n", pi);
1593 if (port_is_bonding_slave(pi)) {
1594 printf("Please remove port %d from bonded device.\n", pi);
1599 if (rte_atomic16_cmpset(&(port->port_status),
1600 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1601 printf("Port %d is already closed\n", pi);
1605 if (rte_atomic16_cmpset(&(port->port_status),
1606 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1607 printf("Port %d is now not stopped\n", pi);
1611 if (port->flow_list)
1612 port_flow_flush(pi);
1613 rte_eth_dev_close(pi);
1615 if (rte_atomic16_cmpset(&(port->port_status),
1616 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1617 printf("Port %d cannot be set to closed\n", pi);
1624 attach_port(char *identifier)
1627 unsigned int socket_id;
1629 printf("Attaching a new port...\n");
1631 if (identifier == NULL) {
1632 printf("Invalid parameters are specified\n");
1636 if (rte_eth_dev_attach(identifier, &pi))
1639 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1640 /* if socket_id is invalid, set to 0 */
1641 if (check_socket_id(socket_id) < 0)
1643 reconfig(pi, socket_id);
1644 rte_eth_promiscuous_enable(pi);
1646 nb_ports = rte_eth_dev_count();
1648 ports[pi].port_status = RTE_PORT_STOPPED;
1650 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1655 detach_port(uint8_t port_id)
1657 char name[RTE_ETH_NAME_MAX_LEN];
1659 printf("Detaching a port...\n");
1661 if (!port_is_closed(port_id)) {
1662 printf("Please close port first\n");
1666 if (ports[port_id].flow_list)
1667 port_flow_flush(port_id);
1669 if (rte_eth_dev_detach(port_id, name))
1672 nb_ports = rte_eth_dev_count();
1674 printf("Port '%s' is detached. Now total ports is %d\n",
1686 stop_packet_forwarding();
1688 if (ports != NULL) {
1690 RTE_ETH_FOREACH_DEV(pt_id) {
1691 printf("\nShutting down port %d...\n", pt_id);
1697 printf("\nBye...\n");
1700 typedef void (*cmd_func_t)(void);
1701 struct pmd_test_command {
1702 const char *cmd_name;
1703 cmd_func_t cmd_func;
1706 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1708 /* Check the link status of all ports in up to 9s, and print them finally */
1710 check_all_ports_link_status(uint32_t port_mask)
1712 #define CHECK_INTERVAL 100 /* 100ms */
1713 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1714 uint8_t portid, count, all_ports_up, print_flag = 0;
1715 struct rte_eth_link link;
1717 printf("Checking link statuses...\n");
1719 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1721 RTE_ETH_FOREACH_DEV(portid) {
1722 if ((port_mask & (1 << portid)) == 0)
1724 memset(&link, 0, sizeof(link));
1725 rte_eth_link_get_nowait(portid, &link);
1726 /* print link status if flag set */
1727 if (print_flag == 1) {
1728 if (link.link_status)
1729 printf("Port %d Link Up - speed %u "
1730 "Mbps - %s\n", (uint8_t)portid,
1731 (unsigned)link.link_speed,
1732 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1733 ("full-duplex") : ("half-duplex\n"));
1735 printf("Port %d Link Down\n",
1739 /* clear all_ports_up flag if any link down */
1740 if (link.link_status == ETH_LINK_DOWN) {
1745 /* after finally printing all link status, get out */
1746 if (print_flag == 1)
1749 if (all_ports_up == 0) {
1751 rte_delay_ms(CHECK_INTERVAL);
1754 /* set the print_flag if all ports up or timeout */
1755 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1765 rmv_event_callback(void *arg)
1767 struct rte_eth_dev *dev;
1768 struct rte_devargs *da;
1770 uint8_t port_id = (intptr_t)arg;
1772 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1773 dev = &rte_eth_devices[port_id];
1774 da = dev->device->devargs;
1777 close_port(port_id);
1778 if (da->type == RTE_DEVTYPE_VIRTUAL)
1779 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1780 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1781 rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1782 printf("removing device %s\n", name);
1783 rte_eal_dev_detach(name);
1784 dev->state = RTE_ETH_DEV_UNUSED;
1787 /* This function is used by the interrupt thread */
1789 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1791 static const char * const event_desc[] = {
1792 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1793 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1794 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1795 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1796 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1797 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1798 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1799 [RTE_ETH_EVENT_MAX] = NULL,
1802 RTE_SET_USED(param);
1804 if (type >= RTE_ETH_EVENT_MAX) {
1805 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1806 port_id, __func__, type);
1809 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1815 case RTE_ETH_EVENT_INTR_RMV:
1816 if (rte_eal_alarm_set(100000,
1817 rmv_event_callback, (void *)(intptr_t)port_id))
1818 fprintf(stderr, "Could not set up deferred device removal\n");
1826 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1830 uint8_t mapping_found = 0;
1832 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1833 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1834 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1835 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1836 tx_queue_stats_mappings[i].queue_id,
1837 tx_queue_stats_mappings[i].stats_counter_id);
1844 port->tx_queue_stats_mapping_enabled = 1;
1849 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1853 uint8_t mapping_found = 0;
1855 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1856 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1857 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1858 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1859 rx_queue_stats_mappings[i].queue_id,
1860 rx_queue_stats_mappings[i].stats_counter_id);
1867 port->rx_queue_stats_mapping_enabled = 1;
1872 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1876 diag = set_tx_queue_stats_mapping_registers(pi, port);
1878 if (diag == -ENOTSUP) {
1879 port->tx_queue_stats_mapping_enabled = 0;
1880 printf("TX queue stats mapping not supported port id=%d\n", pi);
1883 rte_exit(EXIT_FAILURE,
1884 "set_tx_queue_stats_mapping_registers "
1885 "failed for port id=%d diag=%d\n",
1889 diag = set_rx_queue_stats_mapping_registers(pi, port);
1891 if (diag == -ENOTSUP) {
1892 port->rx_queue_stats_mapping_enabled = 0;
1893 printf("RX queue stats mapping not supported port id=%d\n", pi);
1896 rte_exit(EXIT_FAILURE,
1897 "set_rx_queue_stats_mapping_registers "
1898 "failed for port id=%d diag=%d\n",
1904 rxtx_port_config(struct rte_port *port)
1906 port->rx_conf = port->dev_info.default_rxconf;
1907 port->tx_conf = port->dev_info.default_txconf;
1909 /* Check if any RX/TX parameters have been passed */
1910 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1911 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1913 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1914 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1916 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1917 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1919 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1920 port->rx_conf.rx_free_thresh = rx_free_thresh;
1922 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1923 port->rx_conf.rx_drop_en = rx_drop_en;
1925 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1926 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1928 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1929 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1931 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1932 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1934 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1935 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1937 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1938 port->tx_conf.tx_free_thresh = tx_free_thresh;
1940 if (txq_flags != RTE_PMD_PARAM_UNSET)
1941 port->tx_conf.txq_flags = txq_flags;
1945 init_port_config(void)
1948 struct rte_port *port;
1950 RTE_ETH_FOREACH_DEV(pid) {
1952 port->dev_conf.rxmode = rx_mode;
1953 port->dev_conf.fdir_conf = fdir_conf;
1955 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1956 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1958 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1959 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1962 if (port->dcb_flag == 0) {
1963 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1964 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1966 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1969 rxtx_port_config(port);
1971 rte_eth_macaddr_get(pid, &port->eth_addr);
1973 map_port_queue_stats_mapping_registers(pid, port);
1974 #ifdef RTE_NIC_BYPASS
1975 rte_eth_dev_bypass_init(pid);
1978 if (lsc_interrupt &&
1979 (rte_eth_devices[pid].data->dev_flags &
1980 RTE_ETH_DEV_INTR_LSC))
1981 port->dev_conf.intr_conf.lsc = 1;
1982 if (rmv_interrupt &&
1983 (rte_eth_devices[pid].data->dev_flags &
1984 RTE_ETH_DEV_INTR_RMV))
1985 port->dev_conf.intr_conf.rmv = 1;
1989 void set_port_slave_flag(portid_t slave_pid)
1991 struct rte_port *port;
1993 port = &ports[slave_pid];
1994 port->slave_flag = 1;
1997 void clear_port_slave_flag(portid_t slave_pid)
1999 struct rte_port *port;
2001 port = &ports[slave_pid];
2002 port->slave_flag = 0;
2005 uint8_t port_is_bonding_slave(portid_t slave_pid)
2007 struct rte_port *port;
2009 port = &ports[slave_pid];
2010 return port->slave_flag;
2013 const uint16_t vlan_tags[] = {
2014 0, 1, 2, 3, 4, 5, 6, 7,
2015 8, 9, 10, 11, 12, 13, 14, 15,
2016 16, 17, 18, 19, 20, 21, 22, 23,
2017 24, 25, 26, 27, 28, 29, 30, 31
2021 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2022 enum dcb_mode_enable dcb_mode,
2023 enum rte_eth_nb_tcs num_tcs,
2029 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2030 * given above, and the number of traffic classes available for use.
2032 if (dcb_mode == DCB_VT_ENABLED) {
2033 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2034 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2035 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2036 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2038 /* VMDQ+DCB RX and TX configurations */
2039 vmdq_rx_conf->enable_default_pool = 0;
2040 vmdq_rx_conf->default_pool = 0;
2041 vmdq_rx_conf->nb_queue_pools =
2042 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2043 vmdq_tx_conf->nb_queue_pools =
2044 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2046 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2047 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2048 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2049 vmdq_rx_conf->pool_map[i].pools =
2050 1 << (i % vmdq_rx_conf->nb_queue_pools);
2052 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2053 vmdq_rx_conf->dcb_tc[i] = i;
2054 vmdq_tx_conf->dcb_tc[i] = i;
2057 /* set DCB mode of RX and TX of multiple queues */
2058 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2059 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2061 struct rte_eth_dcb_rx_conf *rx_conf =
2062 ð_conf->rx_adv_conf.dcb_rx_conf;
2063 struct rte_eth_dcb_tx_conf *tx_conf =
2064 ð_conf->tx_adv_conf.dcb_tx_conf;
2066 rx_conf->nb_tcs = num_tcs;
2067 tx_conf->nb_tcs = num_tcs;
2069 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2070 rx_conf->dcb_tc[i] = i % num_tcs;
2071 tx_conf->dcb_tc[i] = i % num_tcs;
2073 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2074 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2075 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2079 eth_conf->dcb_capability_en =
2080 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2082 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2088 init_port_dcb_config(portid_t pid,
2089 enum dcb_mode_enable dcb_mode,
2090 enum rte_eth_nb_tcs num_tcs,
2093 struct rte_eth_conf port_conf;
2094 struct rte_port *rte_port;
2098 rte_port = &ports[pid];
2100 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2101 /* Enter DCB configuration status */
2104 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2105 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2108 port_conf.rxmode.hw_vlan_filter = 1;
2111 * Write the configuration into the device.
2112 * Set the numbers of RX & TX queues to 0, so
2113 * the RX & TX queues will not be setup.
2115 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2117 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2119 /* If dev_info.vmdq_pool_base is greater than 0,
2120 * the queue id of vmdq pools is started after pf queues.
2122 if (dcb_mode == DCB_VT_ENABLED &&
2123 rte_port->dev_info.vmdq_pool_base > 0) {
2124 printf("VMDQ_DCB multi-queue mode is nonsensical"
2125 " for port %d.", pid);
2129 /* Assume the ports in testpmd have the same dcb capability
2130 * and has the same number of rxq and txq in dcb mode
2132 if (dcb_mode == DCB_VT_ENABLED) {
2133 if (rte_port->dev_info.max_vfs > 0) {
2134 nb_rxq = rte_port->dev_info.nb_rx_queues;
2135 nb_txq = rte_port->dev_info.nb_tx_queues;
2137 nb_rxq = rte_port->dev_info.max_rx_queues;
2138 nb_txq = rte_port->dev_info.max_tx_queues;
2141 /*if vt is disabled, use all pf queues */
2142 if (rte_port->dev_info.vmdq_pool_base == 0) {
2143 nb_rxq = rte_port->dev_info.max_rx_queues;
2144 nb_txq = rte_port->dev_info.max_tx_queues;
2146 nb_rxq = (queueid_t)num_tcs;
2147 nb_txq = (queueid_t)num_tcs;
2151 rx_free_thresh = 64;
2153 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2155 rxtx_port_config(rte_port);
2157 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2158 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2159 rx_vft_set(pid, vlan_tags[i], 1);
2161 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2162 map_port_queue_stats_mapping_registers(pid, rte_port);
2164 rte_port->dcb_flag = 1;
2172 /* Configuration of Ethernet ports. */
2173 ports = rte_zmalloc("testpmd: ports",
2174 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2175 RTE_CACHE_LINE_SIZE);
2176 if (ports == NULL) {
2177 rte_exit(EXIT_FAILURE,
2178 "rte_zmalloc(%d struct rte_port) failed\n",
2191 signal_handler(int signum)
2193 if (signum == SIGINT || signum == SIGTERM) {
2194 printf("\nSignal %d received, preparing to exit...\n",
2196 #ifdef RTE_LIBRTE_PDUMP
2197 /* uninitialize packet capture framework */
2200 #ifdef RTE_LIBRTE_LATENCY_STATS
2201 rte_latencystats_uninit();
2204 /* exit with the expected status */
2205 signal(signum, SIG_DFL);
2206 kill(getpid(), signum);
2211 main(int argc, char** argv)
2216 signal(SIGINT, signal_handler);
2217 signal(SIGTERM, signal_handler);
2219 diag = rte_eal_init(argc, argv);
2221 rte_panic("Cannot init EAL\n");
2223 #ifdef RTE_LIBRTE_PDUMP
2224 /* initialize packet capture framework */
2225 rte_pdump_init(NULL);
2228 nb_ports = (portid_t) rte_eth_dev_count();
2230 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2232 /* allocate port structures, and init them */
2235 set_def_fwd_config();
2237 rte_panic("Empty set of forwarding logical cores - check the "
2238 "core mask supplied in the command parameters\n");
2240 /* Bitrate stats disabled by default */
2241 bitrate_enabled = 0;
2246 launch_args_parse(argc, argv);
2248 if (!nb_rxq && !nb_txq)
2249 printf("Warning: Either rx or tx queues should be non-zero\n");
2251 if (nb_rxq > 1 && nb_rxq > nb_txq)
2252 printf("Warning: nb_rxq=%d enables RSS configuration, "
2253 "but nb_txq=%d will prevent to fully test it.\n",
2257 if (start_port(RTE_PORT_ALL) != 0)
2258 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2260 /* set all ports to promiscuous mode by default */
2261 RTE_ETH_FOREACH_DEV(port_id)
2262 rte_eth_promiscuous_enable(port_id);
2264 /* Init metrics library */
2265 rte_metrics_init(rte_socket_id());
2267 #ifdef RTE_LIBRTE_LATENCY_STATS
2268 if (latencystats_enabled != 0) {
2269 int ret = rte_latencystats_init(1, NULL);
2271 printf("Warning: latencystats init()"
2272 " returned error %d\n", ret);
2273 printf("Latencystats running on lcore %d\n",
2274 latencystats_lcore_id);
2278 /* Setup bitrate stats */
2279 #ifdef RTE_LIBRTE_BITRATE
2280 if (bitrate_enabled != 0) {
2281 bitrate_data = rte_stats_bitrate_create();
2282 if (bitrate_data == NULL)
2283 rte_exit(EXIT_FAILURE,
2284 "Could not allocate bitrate data.\n");
2285 rte_stats_bitrate_reg(bitrate_data);
2290 #ifdef RTE_LIBRTE_CMDLINE
2291 if (interactive == 1) {
2293 printf("Start automatic packet forwarding\n");
2294 start_packet_forwarding(0);
2304 printf("No commandline core given, start packet forwarding\n");
2305 start_packet_forwarding(0);
2306 printf("Press enter to exit\n");
2307 rc = read(0, &c, 1);