4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
100 * NUMA support configuration.
101 * When set, the NUMA support attempts to dispatch the allocation of the
102 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103 * probed ports among the CPU sockets 0 and 1.
104 * Otherwise, all memory is allocated from CPU socket 0.
106 uint8_t numa_support = 1; /**< numa enabled by default */
109 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112 uint8_t socket_num = UMA_NO_CONFIG;
115 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120 * Record the Ethernet address of peer target ports to which packets are
122 * Must be instantiated with the ethernet addresses of peer traffic generator
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
129 * Probed Target Environment.
131 struct rte_port *ports; /**< For all probed ethernet ports. */
132 portid_t nb_ports; /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
137 * Test Forwarding Configuration.
138 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t nb_cfg_ports; /**< Number of configured ports. */
144 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
153 * Forwarding engines.
155 struct fwd_engine * fwd_engines[] = {
164 #ifdef RTE_LIBRTE_IEEE1588
165 &ieee1588_fwd_engine,
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
178 * specified on command-line. */
181 * Configuration of packet segments used by the "txonly" processing engine.
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 TXONLY_DEF_PACKET_LEN,
187 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
202 * Configurable number of RX/TX queues.
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
208 * Configurable number of RX/TX ring descriptors.
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 #define RTE_PMD_PARAM_UNSET -1
217 * Configurable values of RX and TX ring threshold registers.
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of RX free threshold.
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX drop enable.
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
239 * Configurable value of TX free threshold.
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX RS bit threshold.
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX queue flags.
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Avoids to check link status when starting/stopping a port.
271 uint8_t no_link_check = 0; /* check by default */
274 * Enable link status change notification
276 uint8_t lsc_interrupt = 1; /* enabled by default */
279 * Enable device removal notification.
281 uint8_t rmv_interrupt = 1; /* enabled by default */
284 * NIC bypass mode configuration options.
286 #ifdef RTE_NIC_BYPASS
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
293 #ifdef RTE_LIBRTE_LATENCY_STATS
296 * Set when latency stats is enabled in the commandline
298 uint8_t latencystats_enabled;
301 * Lcore ID to serive latency statistics.
303 lcoreid_t latencystats_lcore_id = -1;
308 * Ethernet device configuration.
310 struct rte_eth_rxmode rx_mode = {
311 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313 .header_split = 0, /**< Header Split disabled. */
314 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
317 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
319 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
322 struct rte_fdir_conf fdir_conf = {
323 .mode = RTE_FDIR_MODE_NONE,
324 .pballoc = RTE_FDIR_PBALLOC_64K,
325 .status = RTE_FDIR_REPORT_STATUS,
327 .vlan_tci_mask = 0x0,
329 .src_ip = 0xFFFFFFFF,
330 .dst_ip = 0xFFFFFFFF,
333 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336 .src_port_mask = 0xFFFF,
337 .dst_port_mask = 0xFFFF,
338 .mac_addr_byte_mask = 0xFF,
339 .tunnel_type_mask = 1,
340 .tunnel_id_mask = 0xFFFFFFFF,
345 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
348 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
351 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353 uint16_t nb_tx_queue_stats_mappings = 0;
354 uint16_t nb_rx_queue_stats_mappings = 0;
356 unsigned max_socket = 0;
358 /* Bitrate statistics */
359 struct rte_stats_bitrates *bitrate_data;
361 /* Forward function declarations */
362 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
363 static void check_all_ports_link_status(uint32_t port_mask);
364 static void eth_event_callback(uint8_t port_id,
365 enum rte_eth_event_type type,
369 * Check if all the ports are started.
370 * If yes, return positive value. If not, return zero.
372 static int all_ports_started(void);
375 * Setup default configuration.
378 set_default_fwd_lcores_config(void)
382 unsigned int sock_num;
385 for (i = 0; i < RTE_MAX_LCORE; i++) {
386 sock_num = rte_lcore_to_socket_id(i) + 1;
387 if (sock_num > max_socket) {
388 if (sock_num > RTE_MAX_NUMA_NODES)
389 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
390 max_socket = sock_num;
392 if (!rte_lcore_is_enabled(i))
394 if (i == rte_get_master_lcore())
396 fwd_lcores_cpuids[nb_lc++] = i;
398 nb_lcores = (lcoreid_t) nb_lc;
399 nb_cfg_lcores = nb_lcores;
404 set_def_peer_eth_addrs(void)
408 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
409 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
410 peer_eth_addrs[i].addr_bytes[5] = i;
415 set_default_fwd_ports_config(void)
419 for (pt_id = 0; pt_id < nb_ports; pt_id++)
420 fwd_ports_ids[pt_id] = pt_id;
422 nb_cfg_ports = nb_ports;
423 nb_fwd_ports = nb_ports;
427 set_def_fwd_config(void)
429 set_default_fwd_lcores_config();
430 set_def_peer_eth_addrs();
431 set_default_fwd_ports_config();
435 * Configuration initialisation done once at init time.
438 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
439 unsigned int socket_id)
441 char pool_name[RTE_MEMPOOL_NAMESIZE];
442 struct rte_mempool *rte_mp = NULL;
445 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
446 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
449 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
450 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
452 #ifdef RTE_LIBRTE_PMD_XENVIRT
453 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
454 (unsigned) mb_mempool_cache,
455 sizeof(struct rte_pktmbuf_pool_private),
456 rte_pktmbuf_pool_init, NULL,
457 rte_pktmbuf_init, NULL,
461 /* if the former XEN allocation failed fall back to normal allocation */
462 if (rte_mp == NULL) {
464 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
465 mb_size, (unsigned) mb_mempool_cache,
466 sizeof(struct rte_pktmbuf_pool_private),
471 if (rte_mempool_populate_anon(rte_mp) == 0) {
472 rte_mempool_free(rte_mp);
476 rte_pktmbuf_pool_init(rte_mp, NULL);
477 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
479 /* wrapper to rte_mempool_create() */
480 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
481 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
486 if (rte_mp == NULL) {
487 rte_exit(EXIT_FAILURE,
488 "Creation of mbuf pool for socket %u failed: %s\n",
489 socket_id, rte_strerror(rte_errno));
490 } else if (verbose_level > 0) {
491 rte_mempool_dump(stdout, rte_mp);
496 * Check given socket id is valid or not with NUMA mode,
497 * if valid, return 0, else return -1
500 check_socket_id(const unsigned int socket_id)
502 static int warning_once = 0;
504 if (socket_id >= max_socket) {
505 if (!warning_once && numa_support)
506 printf("Warning: NUMA should be configured manually by"
507 " using --port-numa-config and"
508 " --ring-numa-config parameters along with"
520 struct rte_port *port;
521 struct rte_mempool *mbp;
522 unsigned int nb_mbuf_per_pool;
524 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
526 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
527 /* Configuration of logical cores. */
528 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
529 sizeof(struct fwd_lcore *) * nb_lcores,
530 RTE_CACHE_LINE_SIZE);
531 if (fwd_lcores == NULL) {
532 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
533 "failed\n", nb_lcores);
535 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
536 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
537 sizeof(struct fwd_lcore),
538 RTE_CACHE_LINE_SIZE);
539 if (fwd_lcores[lc_id] == NULL) {
540 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
543 fwd_lcores[lc_id]->cpuid_idx = lc_id;
547 * Create pools of mbuf.
548 * If NUMA support is disabled, create a single pool of mbuf in
549 * socket 0 memory by default.
550 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
552 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
553 * nb_txd can be configured at run time.
555 if (param_total_num_mbufs)
556 nb_mbuf_per_pool = param_total_num_mbufs;
558 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
559 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
563 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
567 if (socket_num == UMA_NO_CONFIG)
568 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
570 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
574 RTE_ETH_FOREACH_DEV(pid) {
576 rte_eth_dev_info_get(pid, &port->dev_info);
579 if (port_numa[pid] != NUMA_NO_CONFIG)
580 port_per_socket[port_numa[pid]]++;
582 uint32_t socket_id = rte_eth_dev_socket_id(pid);
584 /* if socket_id is invalid, set to 0 */
585 if (check_socket_id(socket_id) < 0)
587 port_per_socket[socket_id]++;
591 /* set flag to initialize port/queue */
592 port->need_reconfig = 1;
593 port->need_reconfig_queues = 1;
598 unsigned int nb_mbuf;
600 if (param_total_num_mbufs && nb_ports != 0)
601 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
603 for (i = 0; i < max_socket; i++) {
604 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
606 mbuf_pool_create(mbuf_data_size,
613 * Records which Mbuf pool to use by each logical core, if needed.
615 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
616 mbp = mbuf_pool_find(
617 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
620 mbp = mbuf_pool_find(0);
621 fwd_lcores[lc_id]->mbp = mbp;
624 /* Configuration of packet forwarding streams. */
625 if (init_fwd_streams() < 0)
626 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
633 reconfig(portid_t new_port_id, unsigned socket_id)
635 struct rte_port *port;
637 /* Reconfiguration of Ethernet ports. */
638 port = &ports[new_port_id];
639 rte_eth_dev_info_get(new_port_id, &port->dev_info);
641 /* set flag to initialize port/queue */
642 port->need_reconfig = 1;
643 port->need_reconfig_queues = 1;
644 port->socket_id = socket_id;
651 init_fwd_streams(void)
654 struct rte_port *port;
655 streamid_t sm_id, nb_fwd_streams_new;
658 /* set socket id according to numa or not */
659 RTE_ETH_FOREACH_DEV(pid) {
661 if (nb_rxq > port->dev_info.max_rx_queues) {
662 printf("Fail: nb_rxq(%d) is greater than "
663 "max_rx_queues(%d)\n", nb_rxq,
664 port->dev_info.max_rx_queues);
667 if (nb_txq > port->dev_info.max_tx_queues) {
668 printf("Fail: nb_txq(%d) is greater than "
669 "max_tx_queues(%d)\n", nb_txq,
670 port->dev_info.max_tx_queues);
674 if (port_numa[pid] != NUMA_NO_CONFIG)
675 port->socket_id = port_numa[pid];
677 port->socket_id = rte_eth_dev_socket_id(pid);
679 /* if socket_id is invalid, set to 0 */
680 if (check_socket_id(port->socket_id) < 0)
685 if (socket_num == UMA_NO_CONFIG)
688 port->socket_id = socket_num;
692 q = RTE_MAX(nb_rxq, nb_txq);
694 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
697 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
698 if (nb_fwd_streams_new == nb_fwd_streams)
701 if (fwd_streams != NULL) {
702 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
703 if (fwd_streams[sm_id] == NULL)
705 rte_free(fwd_streams[sm_id]);
706 fwd_streams[sm_id] = NULL;
708 rte_free(fwd_streams);
713 nb_fwd_streams = nb_fwd_streams_new;
714 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
715 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
716 if (fwd_streams == NULL)
717 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
718 "failed\n", nb_fwd_streams);
720 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
721 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
722 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
723 if (fwd_streams[sm_id] == NULL)
724 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
731 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
735 unsigned int total_burst;
736 unsigned int nb_burst;
737 unsigned int burst_stats[3];
738 uint16_t pktnb_stats[3];
740 int burst_percent[3];
743 * First compute the total number of packet bursts and the
744 * two highest numbers of bursts of the same number of packets.
747 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
748 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
749 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
750 nb_burst = pbs->pkt_burst_spread[nb_pkt];
753 total_burst += nb_burst;
754 if (nb_burst > burst_stats[0]) {
755 burst_stats[1] = burst_stats[0];
756 pktnb_stats[1] = pktnb_stats[0];
757 burst_stats[0] = nb_burst;
758 pktnb_stats[0] = nb_pkt;
761 if (total_burst == 0)
763 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
764 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
765 burst_percent[0], (int) pktnb_stats[0]);
766 if (burst_stats[0] == total_burst) {
770 if (burst_stats[0] + burst_stats[1] == total_burst) {
771 printf(" + %d%% of %d pkts]\n",
772 100 - burst_percent[0], pktnb_stats[1]);
775 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
776 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
777 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
778 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
781 printf(" + %d%% of %d pkts + %d%% of others]\n",
782 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
784 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
787 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
789 struct rte_port *port;
792 static const char *fwd_stats_border = "----------------------";
794 port = &ports[port_id];
795 printf("\n %s Forward statistics for port %-2d %s\n",
796 fwd_stats_border, port_id, fwd_stats_border);
798 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
799 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
801 stats->ipackets, stats->imissed,
802 (uint64_t) (stats->ipackets + stats->imissed));
804 if (cur_fwd_eng == &csum_fwd_engine)
805 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
806 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
807 if ((stats->ierrors + stats->rx_nombuf) > 0) {
808 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
809 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
812 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
814 stats->opackets, port->tx_dropped,
815 (uint64_t) (stats->opackets + port->tx_dropped));
818 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
820 stats->ipackets, stats->imissed,
821 (uint64_t) (stats->ipackets + stats->imissed));
823 if (cur_fwd_eng == &csum_fwd_engine)
824 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
825 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
826 if ((stats->ierrors + stats->rx_nombuf) > 0) {
827 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
828 printf(" RX-nombufs: %14"PRIu64"\n",
832 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
834 stats->opackets, port->tx_dropped,
835 (uint64_t) (stats->opackets + port->tx_dropped));
838 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
840 pkt_burst_stats_display("RX",
841 &port->rx_stream->rx_burst_stats);
843 pkt_burst_stats_display("TX",
844 &port->tx_stream->tx_burst_stats);
847 if (port->rx_queue_stats_mapping_enabled) {
849 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
850 printf(" Stats reg %2d RX-packets:%14"PRIu64
851 " RX-errors:%14"PRIu64
852 " RX-bytes:%14"PRIu64"\n",
853 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
857 if (port->tx_queue_stats_mapping_enabled) {
858 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
859 printf(" Stats reg %2d TX-packets:%14"PRIu64
860 " TX-bytes:%14"PRIu64"\n",
861 i, stats->q_opackets[i], stats->q_obytes[i]);
865 printf(" %s--------------------------------%s\n",
866 fwd_stats_border, fwd_stats_border);
870 fwd_stream_stats_display(streamid_t stream_id)
872 struct fwd_stream *fs;
873 static const char *fwd_top_stats_border = "-------";
875 fs = fwd_streams[stream_id];
876 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
877 (fs->fwd_dropped == 0))
879 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
880 "TX Port=%2d/Queue=%2d %s\n",
881 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
882 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
883 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
884 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
886 /* if checksum mode */
887 if (cur_fwd_eng == &csum_fwd_engine) {
888 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
889 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
892 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
893 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
894 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
899 flush_fwd_rx_queues(void)
901 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
908 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
909 uint64_t timer_period;
911 /* convert to number of cycles */
912 timer_period = rte_get_timer_hz(); /* 1 second timeout */
914 for (j = 0; j < 2; j++) {
915 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
916 for (rxq = 0; rxq < nb_rxq; rxq++) {
917 port_id = fwd_ports_ids[rxp];
919 * testpmd can stuck in the below do while loop
920 * if rte_eth_rx_burst() always returns nonzero
921 * packets. So timer is added to exit this loop
922 * after 1sec timer expiry.
924 prev_tsc = rte_rdtsc();
926 nb_rx = rte_eth_rx_burst(port_id, rxq,
927 pkts_burst, MAX_PKT_BURST);
928 for (i = 0; i < nb_rx; i++)
929 rte_pktmbuf_free(pkts_burst[i]);
931 cur_tsc = rte_rdtsc();
932 diff_tsc = cur_tsc - prev_tsc;
933 timer_tsc += diff_tsc;
934 } while ((nb_rx > 0) &&
935 (timer_tsc < timer_period));
939 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
944 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
946 struct fwd_stream **fsm;
949 #ifdef RTE_LIBRTE_BITRATE
950 uint64_t tics_per_1sec;
952 uint64_t tics_current;
953 uint8_t idx_port, cnt_ports;
955 cnt_ports = rte_eth_dev_count();
956 tics_datum = rte_rdtsc();
957 tics_per_1sec = rte_get_timer_hz();
959 fsm = &fwd_streams[fc->stream_idx];
960 nb_fs = fc->stream_nb;
962 for (sm_id = 0; sm_id < nb_fs; sm_id++)
963 (*pkt_fwd)(fsm[sm_id]);
964 #ifdef RTE_LIBRTE_BITRATE
965 tics_current = rte_rdtsc();
966 if (tics_current - tics_datum >= tics_per_1sec) {
967 /* Periodic bitrate calculation */
968 for (idx_port = 0; idx_port < cnt_ports; idx_port++)
969 rte_stats_bitrate_calc(bitrate_data, idx_port);
970 tics_datum = tics_current;
973 #ifdef RTE_LIBRTE_LATENCY_STATS
974 if (latencystats_lcore_id == rte_lcore_id())
975 rte_latencystats_update();
978 } while (! fc->stopped);
982 start_pkt_forward_on_core(void *fwd_arg)
984 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
985 cur_fwd_config.fwd_eng->packet_fwd);
990 * Run the TXONLY packet forwarding engine to send a single burst of packets.
991 * Used to start communication flows in network loopback test configurations.
994 run_one_txonly_burst_on_core(void *fwd_arg)
996 struct fwd_lcore *fwd_lc;
997 struct fwd_lcore tmp_lcore;
999 fwd_lc = (struct fwd_lcore *) fwd_arg;
1000 tmp_lcore = *fwd_lc;
1001 tmp_lcore.stopped = 1;
1002 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1007 * Launch packet forwarding:
1008 * - Setup per-port forwarding context.
1009 * - launch logical cores with their forwarding configuration.
1012 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1014 port_fwd_begin_t port_fwd_begin;
1019 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1020 if (port_fwd_begin != NULL) {
1021 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1022 (*port_fwd_begin)(fwd_ports_ids[i]);
1024 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1025 lc_id = fwd_lcores_cpuids[i];
1026 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1027 fwd_lcores[i]->stopped = 0;
1028 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1029 fwd_lcores[i], lc_id);
1031 printf("launch lcore %u failed - diag=%d\n",
1038 * Launch packet forwarding configuration.
1041 start_packet_forwarding(int with_tx_first)
1043 port_fwd_begin_t port_fwd_begin;
1044 port_fwd_end_t port_fwd_end;
1045 struct rte_port *port;
1050 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1051 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1053 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1054 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1056 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1057 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1058 (!nb_rxq || !nb_txq))
1059 rte_exit(EXIT_FAILURE,
1060 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1061 cur_fwd_eng->fwd_mode_name);
1063 if (all_ports_started() == 0) {
1064 printf("Not all ports were started\n");
1067 if (test_done == 0) {
1068 printf("Packet forwarding already started\n");
1072 if (init_fwd_streams() < 0) {
1073 printf("Fail from init_fwd_streams()\n");
1078 for (i = 0; i < nb_fwd_ports; i++) {
1079 pt_id = fwd_ports_ids[i];
1080 port = &ports[pt_id];
1081 if (!port->dcb_flag) {
1082 printf("In DCB mode, all forwarding ports must "
1083 "be configured in this mode.\n");
1087 if (nb_fwd_lcores == 1) {
1088 printf("In DCB mode,the nb forwarding cores "
1089 "should be larger than 1.\n");
1096 flush_fwd_rx_queues();
1099 pkt_fwd_config_display(&cur_fwd_config);
1100 rxtx_config_display();
1102 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1103 pt_id = fwd_ports_ids[i];
1104 port = &ports[pt_id];
1105 rte_eth_stats_get(pt_id, &port->stats);
1106 port->tx_dropped = 0;
1108 map_port_queue_stats_mapping_registers(pt_id, port);
1110 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1111 fwd_streams[sm_id]->rx_packets = 0;
1112 fwd_streams[sm_id]->tx_packets = 0;
1113 fwd_streams[sm_id]->fwd_dropped = 0;
1114 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1115 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1117 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1118 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1119 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1120 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1121 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1123 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1124 fwd_streams[sm_id]->core_cycles = 0;
1127 if (with_tx_first) {
1128 port_fwd_begin = tx_only_engine.port_fwd_begin;
1129 if (port_fwd_begin != NULL) {
1130 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1131 (*port_fwd_begin)(fwd_ports_ids[i]);
1133 while (with_tx_first--) {
1134 launch_packet_forwarding(
1135 run_one_txonly_burst_on_core);
1136 rte_eal_mp_wait_lcore();
1138 port_fwd_end = tx_only_engine.port_fwd_end;
1139 if (port_fwd_end != NULL) {
1140 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1141 (*port_fwd_end)(fwd_ports_ids[i]);
1144 launch_packet_forwarding(start_pkt_forward_on_core);
1148 stop_packet_forwarding(void)
1150 struct rte_eth_stats stats;
1151 struct rte_port *port;
1152 port_fwd_end_t port_fwd_end;
1157 uint64_t total_recv;
1158 uint64_t total_xmit;
1159 uint64_t total_rx_dropped;
1160 uint64_t total_tx_dropped;
1161 uint64_t total_rx_nombuf;
1162 uint64_t tx_dropped;
1163 uint64_t rx_bad_ip_csum;
1164 uint64_t rx_bad_l4_csum;
1165 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1166 uint64_t fwd_cycles;
1168 static const char *acc_stats_border = "+++++++++++++++";
1171 printf("Packet forwarding not started\n");
1174 printf("Telling cores to stop...");
1175 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1176 fwd_lcores[lc_id]->stopped = 1;
1177 printf("\nWaiting for lcores to finish...\n");
1178 rte_eal_mp_wait_lcore();
1179 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1180 if (port_fwd_end != NULL) {
1181 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1182 pt_id = fwd_ports_ids[i];
1183 (*port_fwd_end)(pt_id);
1186 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1189 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1190 if (cur_fwd_config.nb_fwd_streams >
1191 cur_fwd_config.nb_fwd_ports) {
1192 fwd_stream_stats_display(sm_id);
1193 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1194 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1196 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1198 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1201 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1202 tx_dropped = (uint64_t) (tx_dropped +
1203 fwd_streams[sm_id]->fwd_dropped);
1204 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1207 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1208 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1209 fwd_streams[sm_id]->rx_bad_ip_csum);
1210 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1214 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1215 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1216 fwd_streams[sm_id]->rx_bad_l4_csum);
1217 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1220 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1221 fwd_cycles = (uint64_t) (fwd_cycles +
1222 fwd_streams[sm_id]->core_cycles);
1227 total_rx_dropped = 0;
1228 total_tx_dropped = 0;
1229 total_rx_nombuf = 0;
1230 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1231 pt_id = fwd_ports_ids[i];
1233 port = &ports[pt_id];
1234 rte_eth_stats_get(pt_id, &stats);
1235 stats.ipackets -= port->stats.ipackets;
1236 port->stats.ipackets = 0;
1237 stats.opackets -= port->stats.opackets;
1238 port->stats.opackets = 0;
1239 stats.ibytes -= port->stats.ibytes;
1240 port->stats.ibytes = 0;
1241 stats.obytes -= port->stats.obytes;
1242 port->stats.obytes = 0;
1243 stats.imissed -= port->stats.imissed;
1244 port->stats.imissed = 0;
1245 stats.oerrors -= port->stats.oerrors;
1246 port->stats.oerrors = 0;
1247 stats.rx_nombuf -= port->stats.rx_nombuf;
1248 port->stats.rx_nombuf = 0;
1250 total_recv += stats.ipackets;
1251 total_xmit += stats.opackets;
1252 total_rx_dropped += stats.imissed;
1253 total_tx_dropped += port->tx_dropped;
1254 total_rx_nombuf += stats.rx_nombuf;
1256 fwd_port_stats_display(pt_id, &stats);
1258 printf("\n %s Accumulated forward statistics for all ports"
1260 acc_stats_border, acc_stats_border);
1261 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1263 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1265 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1266 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1267 if (total_rx_nombuf > 0)
1268 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1269 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1271 acc_stats_border, acc_stats_border);
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1274 printf("\n CPU cycles/packet=%u (total cycles="
1275 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1276 (unsigned int)(fwd_cycles / total_recv),
1277 fwd_cycles, total_recv);
1279 printf("\nDone.\n");
1284 dev_set_link_up(portid_t pid)
1286 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1287 printf("\nSet link up fail.\n");
1291 dev_set_link_down(portid_t pid)
1293 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1294 printf("\nSet link down fail.\n");
1298 all_ports_started(void)
1301 struct rte_port *port;
1303 RTE_ETH_FOREACH_DEV(pi) {
1305 /* Check if there is a port which is not started */
1306 if ((port->port_status != RTE_PORT_STARTED) &&
1307 (port->slave_flag == 0))
1311 /* No port is not started */
1316 all_ports_stopped(void)
1319 struct rte_port *port;
1321 RTE_ETH_FOREACH_DEV(pi) {
1323 if ((port->port_status != RTE_PORT_STOPPED) &&
1324 (port->slave_flag == 0))
1332 port_is_started(portid_t port_id)
1334 if (port_id_is_invalid(port_id, ENABLED_WARN))
1337 if (ports[port_id].port_status != RTE_PORT_STARTED)
1344 port_is_closed(portid_t port_id)
1346 if (port_id_is_invalid(port_id, ENABLED_WARN))
1349 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1356 start_port(portid_t pid)
1358 int diag, need_check_link_status = -1;
1361 struct rte_port *port;
1362 struct ether_addr mac_addr;
1363 enum rte_eth_event_type event_type;
1365 if (port_id_is_invalid(pid, ENABLED_WARN))
1370 RTE_ETH_FOREACH_DEV(pi) {
1371 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1374 need_check_link_status = 0;
1376 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1377 RTE_PORT_HANDLING) == 0) {
1378 printf("Port %d is now not stopped\n", pi);
1382 if (port->need_reconfig > 0) {
1383 port->need_reconfig = 0;
1385 printf("Configuring Port %d (socket %u)\n", pi,
1387 /* configure port */
1388 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1391 if (rte_atomic16_cmpset(&(port->port_status),
1392 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393 printf("Port %d can not be set back "
1394 "to stopped\n", pi);
1395 printf("Fail to configure port %d\n", pi);
1396 /* try to reconfigure port next time */
1397 port->need_reconfig = 1;
1401 if (port->need_reconfig_queues > 0) {
1402 port->need_reconfig_queues = 0;
1403 /* setup tx queues */
1404 for (qi = 0; qi < nb_txq; qi++) {
1405 if ((numa_support) &&
1406 (txring_numa[pi] != NUMA_NO_CONFIG))
1407 diag = rte_eth_tx_queue_setup(pi, qi,
1408 nb_txd,txring_numa[pi],
1411 diag = rte_eth_tx_queue_setup(pi, qi,
1412 nb_txd,port->socket_id,
1418 /* Fail to setup tx queue, return */
1419 if (rte_atomic16_cmpset(&(port->port_status),
1421 RTE_PORT_STOPPED) == 0)
1422 printf("Port %d can not be set back "
1423 "to stopped\n", pi);
1424 printf("Fail to configure port %d tx queues\n", pi);
1425 /* try to reconfigure queues next time */
1426 port->need_reconfig_queues = 1;
1429 /* setup rx queues */
1430 for (qi = 0; qi < nb_rxq; qi++) {
1431 if ((numa_support) &&
1432 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1433 struct rte_mempool * mp =
1434 mbuf_pool_find(rxring_numa[pi]);
1436 printf("Failed to setup RX queue:"
1437 "No mempool allocation"
1438 " on the socket %d\n",
1443 diag = rte_eth_rx_queue_setup(pi, qi,
1444 nb_rxd,rxring_numa[pi],
1445 &(port->rx_conf),mp);
1447 struct rte_mempool *mp =
1448 mbuf_pool_find(port->socket_id);
1450 printf("Failed to setup RX queue:"
1451 "No mempool allocation"
1452 " on the socket %d\n",
1456 diag = rte_eth_rx_queue_setup(pi, qi,
1457 nb_rxd,port->socket_id,
1458 &(port->rx_conf), mp);
1463 /* Fail to setup rx queue, return */
1464 if (rte_atomic16_cmpset(&(port->port_status),
1466 RTE_PORT_STOPPED) == 0)
1467 printf("Port %d can not be set back "
1468 "to stopped\n", pi);
1469 printf("Fail to configure port %d rx queues\n", pi);
1470 /* try to reconfigure queues next time */
1471 port->need_reconfig_queues = 1;
1476 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1477 event_type < RTE_ETH_EVENT_MAX;
1479 diag = rte_eth_dev_callback_register(pi,
1484 printf("Failed to setup even callback for event %d\n",
1491 if (rte_eth_dev_start(pi) < 0) {
1492 printf("Fail to start port %d\n", pi);
1494 /* Fail to setup rx queue, return */
1495 if (rte_atomic16_cmpset(&(port->port_status),
1496 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1497 printf("Port %d can not be set back to "
1502 if (rte_atomic16_cmpset(&(port->port_status),
1503 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1504 printf("Port %d can not be set into started\n", pi);
1506 rte_eth_macaddr_get(pi, &mac_addr);
1507 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1508 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1509 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1510 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1512 /* at least one port started, need checking link status */
1513 need_check_link_status = 1;
1516 if (need_check_link_status == 1 && !no_link_check)
1517 check_all_ports_link_status(RTE_PORT_ALL);
1518 else if (need_check_link_status == 0)
1519 printf("Please stop the ports first\n");
1526 stop_port(portid_t pid)
1529 struct rte_port *port;
1530 int need_check_link_status = 0;
1537 if (port_id_is_invalid(pid, ENABLED_WARN))
1540 printf("Stopping ports...\n");
1542 RTE_ETH_FOREACH_DEV(pi) {
1543 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1546 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1547 printf("Please remove port %d from forwarding configuration.\n", pi);
1551 if (port_is_bonding_slave(pi)) {
1552 printf("Please remove port %d from bonded device.\n", pi);
1557 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1558 RTE_PORT_HANDLING) == 0)
1561 rte_eth_dev_stop(pi);
1563 if (rte_atomic16_cmpset(&(port->port_status),
1564 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1565 printf("Port %d can not be set into stopped\n", pi);
1566 need_check_link_status = 1;
1568 if (need_check_link_status && !no_link_check)
1569 check_all_ports_link_status(RTE_PORT_ALL);
1575 close_port(portid_t pid)
1578 struct rte_port *port;
1580 if (port_id_is_invalid(pid, ENABLED_WARN))
1583 printf("Closing ports...\n");
1585 RTE_ETH_FOREACH_DEV(pi) {
1586 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1589 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1590 printf("Please remove port %d from forwarding configuration.\n", pi);
1594 if (port_is_bonding_slave(pi)) {
1595 printf("Please remove port %d from bonded device.\n", pi);
1600 if (rte_atomic16_cmpset(&(port->port_status),
1601 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1602 printf("Port %d is already closed\n", pi);
1606 if (rte_atomic16_cmpset(&(port->port_status),
1607 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1608 printf("Port %d is now not stopped\n", pi);
1612 if (port->flow_list)
1613 port_flow_flush(pi);
1614 rte_eth_dev_close(pi);
1616 if (rte_atomic16_cmpset(&(port->port_status),
1617 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1618 printf("Port %d cannot be set to closed\n", pi);
1625 attach_port(char *identifier)
1628 unsigned int socket_id;
1630 printf("Attaching a new port...\n");
1632 if (identifier == NULL) {
1633 printf("Invalid parameters are specified\n");
1637 if (rte_eth_dev_attach(identifier, &pi))
1640 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1641 /* if socket_id is invalid, set to 0 */
1642 if (check_socket_id(socket_id) < 0)
1644 reconfig(pi, socket_id);
1645 rte_eth_promiscuous_enable(pi);
1647 nb_ports = rte_eth_dev_count();
1649 ports[pi].port_status = RTE_PORT_STOPPED;
1651 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1656 detach_port(uint8_t port_id)
1658 char name[RTE_ETH_NAME_MAX_LEN];
1660 printf("Detaching a port...\n");
1662 if (!port_is_closed(port_id)) {
1663 printf("Please close port first\n");
1667 if (ports[port_id].flow_list)
1668 port_flow_flush(port_id);
1670 if (rte_eth_dev_detach(port_id, name))
1673 nb_ports = rte_eth_dev_count();
1675 printf("Port '%s' is detached. Now total ports is %d\n",
1687 stop_packet_forwarding();
1689 if (ports != NULL) {
1691 RTE_ETH_FOREACH_DEV(pt_id) {
1692 printf("\nShutting down port %d...\n", pt_id);
1698 printf("\nBye...\n");
1701 typedef void (*cmd_func_t)(void);
1702 struct pmd_test_command {
1703 const char *cmd_name;
1704 cmd_func_t cmd_func;
1707 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1709 /* Check the link status of all ports in up to 9s, and print them finally */
1711 check_all_ports_link_status(uint32_t port_mask)
1713 #define CHECK_INTERVAL 100 /* 100ms */
1714 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1715 uint8_t portid, count, all_ports_up, print_flag = 0;
1716 struct rte_eth_link link;
1718 printf("Checking link statuses...\n");
1720 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1722 RTE_ETH_FOREACH_DEV(portid) {
1723 if ((port_mask & (1 << portid)) == 0)
1725 memset(&link, 0, sizeof(link));
1726 rte_eth_link_get_nowait(portid, &link);
1727 /* print link status if flag set */
1728 if (print_flag == 1) {
1729 if (link.link_status)
1730 printf("Port %d Link Up - speed %u "
1731 "Mbps - %s\n", (uint8_t)portid,
1732 (unsigned)link.link_speed,
1733 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1734 ("full-duplex") : ("half-duplex\n"));
1736 printf("Port %d Link Down\n",
1740 /* clear all_ports_up flag if any link down */
1741 if (link.link_status == ETH_LINK_DOWN) {
1746 /* after finally printing all link status, get out */
1747 if (print_flag == 1)
1750 if (all_ports_up == 0) {
1752 rte_delay_ms(CHECK_INTERVAL);
1755 /* set the print_flag if all ports up or timeout */
1756 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1766 rmv_event_callback(void *arg)
1768 struct rte_eth_dev *dev;
1769 struct rte_devargs *da;
1771 uint8_t port_id = (intptr_t)arg;
1773 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1774 dev = &rte_eth_devices[port_id];
1775 da = dev->device->devargs;
1778 close_port(port_id);
1779 if (da->type == RTE_DEVTYPE_VIRTUAL)
1780 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1781 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1782 rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1783 printf("removing device %s\n", name);
1784 rte_eal_dev_detach(name);
1785 dev->state = RTE_ETH_DEV_UNUSED;
1788 /* This function is used by the interrupt thread */
1790 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1792 static const char * const event_desc[] = {
1793 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1794 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1795 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1796 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1797 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1798 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1799 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1800 [RTE_ETH_EVENT_MAX] = NULL,
1803 RTE_SET_USED(param);
1805 if (type >= RTE_ETH_EVENT_MAX) {
1806 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1807 port_id, __func__, type);
1810 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1816 case RTE_ETH_EVENT_INTR_RMV:
1817 if (rte_eal_alarm_set(100000,
1818 rmv_event_callback, (void *)(intptr_t)port_id))
1819 fprintf(stderr, "Could not set up deferred device removal\n");
1827 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1831 uint8_t mapping_found = 0;
1833 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1834 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1835 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1836 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1837 tx_queue_stats_mappings[i].queue_id,
1838 tx_queue_stats_mappings[i].stats_counter_id);
1845 port->tx_queue_stats_mapping_enabled = 1;
1850 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1854 uint8_t mapping_found = 0;
1856 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1857 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1858 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1859 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1860 rx_queue_stats_mappings[i].queue_id,
1861 rx_queue_stats_mappings[i].stats_counter_id);
1868 port->rx_queue_stats_mapping_enabled = 1;
1873 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1877 diag = set_tx_queue_stats_mapping_registers(pi, port);
1879 if (diag == -ENOTSUP) {
1880 port->tx_queue_stats_mapping_enabled = 0;
1881 printf("TX queue stats mapping not supported port id=%d\n", pi);
1884 rte_exit(EXIT_FAILURE,
1885 "set_tx_queue_stats_mapping_registers "
1886 "failed for port id=%d diag=%d\n",
1890 diag = set_rx_queue_stats_mapping_registers(pi, port);
1892 if (diag == -ENOTSUP) {
1893 port->rx_queue_stats_mapping_enabled = 0;
1894 printf("RX queue stats mapping not supported port id=%d\n", pi);
1897 rte_exit(EXIT_FAILURE,
1898 "set_rx_queue_stats_mapping_registers "
1899 "failed for port id=%d diag=%d\n",
1905 rxtx_port_config(struct rte_port *port)
1907 port->rx_conf = port->dev_info.default_rxconf;
1908 port->tx_conf = port->dev_info.default_txconf;
1910 /* Check if any RX/TX parameters have been passed */
1911 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1912 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1914 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1915 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1917 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1918 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1920 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1921 port->rx_conf.rx_free_thresh = rx_free_thresh;
1923 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1924 port->rx_conf.rx_drop_en = rx_drop_en;
1926 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1927 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1929 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1930 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1932 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1933 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1935 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1936 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1938 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1939 port->tx_conf.tx_free_thresh = tx_free_thresh;
1941 if (txq_flags != RTE_PMD_PARAM_UNSET)
1942 port->tx_conf.txq_flags = txq_flags;
1946 init_port_config(void)
1949 struct rte_port *port;
1951 RTE_ETH_FOREACH_DEV(pid) {
1953 port->dev_conf.rxmode = rx_mode;
1954 port->dev_conf.fdir_conf = fdir_conf;
1956 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1957 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1959 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1960 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1963 if (port->dcb_flag == 0) {
1964 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1965 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1967 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1970 rxtx_port_config(port);
1972 rte_eth_macaddr_get(pid, &port->eth_addr);
1974 map_port_queue_stats_mapping_registers(pid, port);
1975 #ifdef RTE_NIC_BYPASS
1976 rte_eth_dev_bypass_init(pid);
1979 if (lsc_interrupt &&
1980 (rte_eth_devices[pid].data->dev_flags &
1981 RTE_ETH_DEV_INTR_LSC))
1982 port->dev_conf.intr_conf.lsc = 1;
1983 if (rmv_interrupt &&
1984 (rte_eth_devices[pid].data->dev_flags &
1985 RTE_ETH_DEV_INTR_RMV))
1986 port->dev_conf.intr_conf.rmv = 1;
1990 void set_port_slave_flag(portid_t slave_pid)
1992 struct rte_port *port;
1994 port = &ports[slave_pid];
1995 port->slave_flag = 1;
1998 void clear_port_slave_flag(portid_t slave_pid)
2000 struct rte_port *port;
2002 port = &ports[slave_pid];
2003 port->slave_flag = 0;
2006 uint8_t port_is_bonding_slave(portid_t slave_pid)
2008 struct rte_port *port;
2010 port = &ports[slave_pid];
2011 return port->slave_flag;
2014 const uint16_t vlan_tags[] = {
2015 0, 1, 2, 3, 4, 5, 6, 7,
2016 8, 9, 10, 11, 12, 13, 14, 15,
2017 16, 17, 18, 19, 20, 21, 22, 23,
2018 24, 25, 26, 27, 28, 29, 30, 31
2022 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2023 enum dcb_mode_enable dcb_mode,
2024 enum rte_eth_nb_tcs num_tcs,
2030 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2031 * given above, and the number of traffic classes available for use.
2033 if (dcb_mode == DCB_VT_ENABLED) {
2034 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2035 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2036 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2037 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2039 /* VMDQ+DCB RX and TX configurations */
2040 vmdq_rx_conf->enable_default_pool = 0;
2041 vmdq_rx_conf->default_pool = 0;
2042 vmdq_rx_conf->nb_queue_pools =
2043 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2044 vmdq_tx_conf->nb_queue_pools =
2045 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2047 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2048 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2049 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2050 vmdq_rx_conf->pool_map[i].pools =
2051 1 << (i % vmdq_rx_conf->nb_queue_pools);
2053 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2054 vmdq_rx_conf->dcb_tc[i] = i;
2055 vmdq_tx_conf->dcb_tc[i] = i;
2058 /* set DCB mode of RX and TX of multiple queues */
2059 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2060 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2062 struct rte_eth_dcb_rx_conf *rx_conf =
2063 ð_conf->rx_adv_conf.dcb_rx_conf;
2064 struct rte_eth_dcb_tx_conf *tx_conf =
2065 ð_conf->tx_adv_conf.dcb_tx_conf;
2067 rx_conf->nb_tcs = num_tcs;
2068 tx_conf->nb_tcs = num_tcs;
2070 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2071 rx_conf->dcb_tc[i] = i % num_tcs;
2072 tx_conf->dcb_tc[i] = i % num_tcs;
2074 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2075 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2076 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2080 eth_conf->dcb_capability_en =
2081 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2083 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2089 init_port_dcb_config(portid_t pid,
2090 enum dcb_mode_enable dcb_mode,
2091 enum rte_eth_nb_tcs num_tcs,
2094 struct rte_eth_conf port_conf;
2095 struct rte_port *rte_port;
2099 rte_port = &ports[pid];
2101 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2102 /* Enter DCB configuration status */
2105 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2106 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2109 port_conf.rxmode.hw_vlan_filter = 1;
2112 * Write the configuration into the device.
2113 * Set the numbers of RX & TX queues to 0, so
2114 * the RX & TX queues will not be setup.
2116 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2118 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2120 /* If dev_info.vmdq_pool_base is greater than 0,
2121 * the queue id of vmdq pools is started after pf queues.
2123 if (dcb_mode == DCB_VT_ENABLED &&
2124 rte_port->dev_info.vmdq_pool_base > 0) {
2125 printf("VMDQ_DCB multi-queue mode is nonsensical"
2126 " for port %d.", pid);
2130 /* Assume the ports in testpmd have the same dcb capability
2131 * and has the same number of rxq and txq in dcb mode
2133 if (dcb_mode == DCB_VT_ENABLED) {
2134 if (rte_port->dev_info.max_vfs > 0) {
2135 nb_rxq = rte_port->dev_info.nb_rx_queues;
2136 nb_txq = rte_port->dev_info.nb_tx_queues;
2138 nb_rxq = rte_port->dev_info.max_rx_queues;
2139 nb_txq = rte_port->dev_info.max_tx_queues;
2142 /*if vt is disabled, use all pf queues */
2143 if (rte_port->dev_info.vmdq_pool_base == 0) {
2144 nb_rxq = rte_port->dev_info.max_rx_queues;
2145 nb_txq = rte_port->dev_info.max_tx_queues;
2147 nb_rxq = (queueid_t)num_tcs;
2148 nb_txq = (queueid_t)num_tcs;
2152 rx_free_thresh = 64;
2154 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2156 rxtx_port_config(rte_port);
2158 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2159 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2160 rx_vft_set(pid, vlan_tags[i], 1);
2162 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2163 map_port_queue_stats_mapping_registers(pid, rte_port);
2165 rte_port->dcb_flag = 1;
2173 /* Configuration of Ethernet ports. */
2174 ports = rte_zmalloc("testpmd: ports",
2175 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2176 RTE_CACHE_LINE_SIZE);
2177 if (ports == NULL) {
2178 rte_exit(EXIT_FAILURE,
2179 "rte_zmalloc(%d struct rte_port) failed\n",
2192 signal_handler(int signum)
2194 if (signum == SIGINT || signum == SIGTERM) {
2195 printf("\nSignal %d received, preparing to exit...\n",
2197 #ifdef RTE_LIBRTE_PDUMP
2198 /* uninitialize packet capture framework */
2201 #ifdef RTE_LIBRTE_LATENCY_STATS
2202 rte_latencystats_uninit();
2205 /* exit with the expected status */
2206 signal(signum, SIG_DFL);
2207 kill(getpid(), signum);
2212 main(int argc, char** argv)
2217 signal(SIGINT, signal_handler);
2218 signal(SIGTERM, signal_handler);
2220 diag = rte_eal_init(argc, argv);
2222 rte_panic("Cannot init EAL\n");
2224 #ifdef RTE_LIBRTE_PDUMP
2225 /* initialize packet capture framework */
2226 rte_pdump_init(NULL);
2229 nb_ports = (portid_t) rte_eth_dev_count();
2231 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2233 /* allocate port structures, and init them */
2236 set_def_fwd_config();
2238 rte_panic("Empty set of forwarding logical cores - check the "
2239 "core mask supplied in the command parameters\n");
2244 launch_args_parse(argc, argv);
2246 if (!nb_rxq && !nb_txq)
2247 printf("Warning: Either rx or tx queues should be non-zero\n");
2249 if (nb_rxq > 1 && nb_rxq > nb_txq)
2250 printf("Warning: nb_rxq=%d enables RSS configuration, "
2251 "but nb_txq=%d will prevent to fully test it.\n",
2255 if (start_port(RTE_PORT_ALL) != 0)
2256 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2258 /* set all ports to promiscuous mode by default */
2259 RTE_ETH_FOREACH_DEV(port_id)
2260 rte_eth_promiscuous_enable(port_id);
2262 /* Init metrics library */
2263 rte_metrics_init(rte_socket_id());
2265 #ifdef RTE_LIBRTE_LATENCY_STATS
2266 if (latencystats_enabled != 0) {
2267 int ret = rte_latencystats_init(1, NULL);
2269 printf("Warning: latencystats init()"
2270 " returned error %d\n", ret);
2271 printf("Latencystats running on lcore %d\n",
2272 latencystats_lcore_id);
2276 /* Setup bitrate stats */
2277 #ifdef RTE_LIBRTE_BITRATE
2278 bitrate_data = rte_stats_bitrate_create();
2279 if (bitrate_data == NULL)
2280 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2281 rte_stats_bitrate_reg(bitrate_data);
2285 #ifdef RTE_LIBRTE_CMDLINE
2286 if (interactive == 1) {
2288 printf("Start automatic packet forwarding\n");
2289 start_packet_forwarding(0);
2298 printf("No commandline core given, start packet forwarding\n");
2299 start_packet_forwarding(0);
2300 printf("Press enter to exit\n");
2301 rc = read(0, &c, 1);