4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
100 * NUMA support configuration.
101 * When set, the NUMA support attempts to dispatch the allocation of the
102 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103 * probed ports among the CPU sockets 0 and 1.
104 * Otherwise, all memory is allocated from CPU socket 0.
106 uint8_t numa_support = 1; /**< numa enabled by default */
109 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112 uint8_t socket_num = UMA_NO_CONFIG;
115 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
120 * Record the Ethernet address of peer target ports to which packets are
122 * Must be instantiated with the ethernet addresses of peer traffic generator
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
129 * Probed Target Environment.
131 struct rte_port *ports; /**< For all probed ethernet ports. */
132 portid_t nb_ports; /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
137 * Test Forwarding Configuration.
138 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t nb_cfg_ports; /**< Number of configured ports. */
144 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
153 * Forwarding engines.
155 struct fwd_engine * fwd_engines[] = {
164 #ifdef RTE_LIBRTE_IEEE1588
165 &ieee1588_fwd_engine,
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
178 * specified on command-line. */
181 * Configuration of packet segments used by the "txonly" processing engine.
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 TXONLY_DEF_PACKET_LEN,
187 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
202 * Configurable number of RX/TX queues.
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
208 * Configurable number of RX/TX ring descriptors.
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 #define RTE_PMD_PARAM_UNSET -1
217 * Configurable values of RX and TX ring threshold registers.
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of RX free threshold.
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX drop enable.
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
239 * Configurable value of TX free threshold.
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX RS bit threshold.
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX queue flags.
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Avoids to check link status when starting/stopping a port.
271 uint8_t no_link_check = 0; /* check by default */
274 * Enable link status change notification
276 uint8_t lsc_interrupt = 1; /* enabled by default */
279 * Enable device removal notification.
281 uint8_t rmv_interrupt = 1; /* enabled by default */
284 * NIC bypass mode configuration options.
286 #ifdef RTE_NIC_BYPASS
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
293 #ifdef RTE_LIBRTE_LATENCY_STATS
296 * Set when latency stats is enabled in the commandline
298 uint8_t latencystats_enabled;
301 * Lcore ID to serive latency statistics.
303 lcoreid_t latencystats_lcore_id = -1;
308 * Ethernet device configuration.
310 struct rte_eth_rxmode rx_mode = {
311 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313 .header_split = 0, /**< Header Split disabled. */
314 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
317 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
319 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
322 struct rte_fdir_conf fdir_conf = {
323 .mode = RTE_FDIR_MODE_NONE,
324 .pballoc = RTE_FDIR_PBALLOC_64K,
325 .status = RTE_FDIR_REPORT_STATUS,
327 .vlan_tci_mask = 0x0,
329 .src_ip = 0xFFFFFFFF,
330 .dst_ip = 0xFFFFFFFF,
333 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336 .src_port_mask = 0xFFFF,
337 .dst_port_mask = 0xFFFF,
338 .mac_addr_byte_mask = 0xFF,
339 .tunnel_type_mask = 1,
340 .tunnel_id_mask = 0xFFFFFFFF,
345 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
348 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
351 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353 uint16_t nb_tx_queue_stats_mappings = 0;
354 uint16_t nb_rx_queue_stats_mappings = 0;
356 unsigned max_socket = 0;
358 /* Bitrate statistics */
359 struct rte_stats_bitrates *bitrate_data;
361 /* Forward function declarations */
362 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
363 static void check_all_ports_link_status(uint32_t port_mask);
364 static void eth_event_callback(uint8_t port_id,
365 enum rte_eth_event_type type,
369 * Check if all the ports are started.
370 * If yes, return positive value. If not, return zero.
372 static int all_ports_started(void);
375 * Setup default configuration.
378 set_default_fwd_lcores_config(void)
382 unsigned int sock_num;
385 for (i = 0; i < RTE_MAX_LCORE; i++) {
386 sock_num = rte_lcore_to_socket_id(i) + 1;
387 if (sock_num > max_socket) {
388 if (sock_num > RTE_MAX_NUMA_NODES)
389 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
390 max_socket = sock_num;
392 if (!rte_lcore_is_enabled(i))
394 if (i == rte_get_master_lcore())
396 fwd_lcores_cpuids[nb_lc++] = i;
398 nb_lcores = (lcoreid_t) nb_lc;
399 nb_cfg_lcores = nb_lcores;
404 set_def_peer_eth_addrs(void)
408 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
409 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
410 peer_eth_addrs[i].addr_bytes[5] = i;
415 set_default_fwd_ports_config(void)
419 for (pt_id = 0; pt_id < nb_ports; pt_id++)
420 fwd_ports_ids[pt_id] = pt_id;
422 nb_cfg_ports = nb_ports;
423 nb_fwd_ports = nb_ports;
427 set_def_fwd_config(void)
429 set_default_fwd_lcores_config();
430 set_def_peer_eth_addrs();
431 set_default_fwd_ports_config();
435 * Configuration initialisation done once at init time.
438 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
439 unsigned int socket_id)
441 char pool_name[RTE_MEMPOOL_NAMESIZE];
442 struct rte_mempool *rte_mp = NULL;
445 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
446 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
449 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
450 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
452 #ifdef RTE_LIBRTE_PMD_XENVIRT
453 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
454 (unsigned) mb_mempool_cache,
455 sizeof(struct rte_pktmbuf_pool_private),
456 rte_pktmbuf_pool_init, NULL,
457 rte_pktmbuf_init, NULL,
461 /* if the former XEN allocation failed fall back to normal allocation */
462 if (rte_mp == NULL) {
464 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
465 mb_size, (unsigned) mb_mempool_cache,
466 sizeof(struct rte_pktmbuf_pool_private),
471 if (rte_mempool_populate_anon(rte_mp) == 0) {
472 rte_mempool_free(rte_mp);
476 rte_pktmbuf_pool_init(rte_mp, NULL);
477 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
479 /* wrapper to rte_mempool_create() */
480 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
481 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
486 if (rte_mp == NULL) {
487 rte_exit(EXIT_FAILURE,
488 "Creation of mbuf pool for socket %u failed: %s\n",
489 socket_id, rte_strerror(rte_errno));
490 } else if (verbose_level > 0) {
491 rte_mempool_dump(stdout, rte_mp);
496 * Check given socket id is valid or not with NUMA mode,
497 * if valid, return 0, else return -1
500 check_socket_id(const unsigned int socket_id)
502 static int warning_once = 0;
504 if (socket_id >= max_socket) {
505 if (!warning_once && numa_support)
506 printf("Warning: NUMA should be configured manually by"
507 " using --port-numa-config and"
508 " --ring-numa-config parameters along with"
520 struct rte_port *port;
521 struct rte_mempool *mbp;
522 unsigned int nb_mbuf_per_pool;
524 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
526 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
527 /* Configuration of logical cores. */
528 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
529 sizeof(struct fwd_lcore *) * nb_lcores,
530 RTE_CACHE_LINE_SIZE);
531 if (fwd_lcores == NULL) {
532 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
533 "failed\n", nb_lcores);
535 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
536 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
537 sizeof(struct fwd_lcore),
538 RTE_CACHE_LINE_SIZE);
539 if (fwd_lcores[lc_id] == NULL) {
540 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
543 fwd_lcores[lc_id]->cpuid_idx = lc_id;
546 RTE_ETH_FOREACH_DEV(pid) {
548 rte_eth_dev_info_get(pid, &port->dev_info);
551 if (port_numa[pid] != NUMA_NO_CONFIG)
552 port_per_socket[port_numa[pid]]++;
554 uint32_t socket_id = rte_eth_dev_socket_id(pid);
556 /* if socket_id is invalid, set to 0 */
557 if (check_socket_id(socket_id) < 0)
559 port_per_socket[socket_id]++;
563 /* set flag to initialize port/queue */
564 port->need_reconfig = 1;
565 port->need_reconfig_queues = 1;
569 * Create pools of mbuf.
570 * If NUMA support is disabled, create a single pool of mbuf in
571 * socket 0 memory by default.
572 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
574 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
575 * nb_txd can be configured at run time.
577 if (param_total_num_mbufs)
578 nb_mbuf_per_pool = param_total_num_mbufs;
580 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
581 (nb_lcores * mb_mempool_cache) +
582 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
583 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
589 for (i = 0; i < max_socket; i++)
590 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
592 if (socket_num == UMA_NO_CONFIG)
593 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
595 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
602 * Records which Mbuf pool to use by each logical core, if needed.
604 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
605 mbp = mbuf_pool_find(
606 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
609 mbp = mbuf_pool_find(0);
610 fwd_lcores[lc_id]->mbp = mbp;
613 /* Configuration of packet forwarding streams. */
614 if (init_fwd_streams() < 0)
615 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
622 reconfig(portid_t new_port_id, unsigned socket_id)
624 struct rte_port *port;
626 /* Reconfiguration of Ethernet ports. */
627 port = &ports[new_port_id];
628 rte_eth_dev_info_get(new_port_id, &port->dev_info);
630 /* set flag to initialize port/queue */
631 port->need_reconfig = 1;
632 port->need_reconfig_queues = 1;
633 port->socket_id = socket_id;
640 init_fwd_streams(void)
643 struct rte_port *port;
644 streamid_t sm_id, nb_fwd_streams_new;
647 /* set socket id according to numa or not */
648 RTE_ETH_FOREACH_DEV(pid) {
650 if (nb_rxq > port->dev_info.max_rx_queues) {
651 printf("Fail: nb_rxq(%d) is greater than "
652 "max_rx_queues(%d)\n", nb_rxq,
653 port->dev_info.max_rx_queues);
656 if (nb_txq > port->dev_info.max_tx_queues) {
657 printf("Fail: nb_txq(%d) is greater than "
658 "max_tx_queues(%d)\n", nb_txq,
659 port->dev_info.max_tx_queues);
663 if (port_numa[pid] != NUMA_NO_CONFIG)
664 port->socket_id = port_numa[pid];
666 port->socket_id = rte_eth_dev_socket_id(pid);
668 /* if socket_id is invalid, set to 0 */
669 if (check_socket_id(port->socket_id) < 0)
674 if (socket_num == UMA_NO_CONFIG)
677 port->socket_id = socket_num;
681 q = RTE_MAX(nb_rxq, nb_txq);
683 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
686 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
687 if (nb_fwd_streams_new == nb_fwd_streams)
690 if (fwd_streams != NULL) {
691 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
692 if (fwd_streams[sm_id] == NULL)
694 rte_free(fwd_streams[sm_id]);
695 fwd_streams[sm_id] = NULL;
697 rte_free(fwd_streams);
702 nb_fwd_streams = nb_fwd_streams_new;
703 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
704 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
705 if (fwd_streams == NULL)
706 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
707 "failed\n", nb_fwd_streams);
709 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
710 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
711 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
712 if (fwd_streams[sm_id] == NULL)
713 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
720 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
722 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
724 unsigned int total_burst;
725 unsigned int nb_burst;
726 unsigned int burst_stats[3];
727 uint16_t pktnb_stats[3];
729 int burst_percent[3];
732 * First compute the total number of packet bursts and the
733 * two highest numbers of bursts of the same number of packets.
736 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
737 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
738 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
739 nb_burst = pbs->pkt_burst_spread[nb_pkt];
742 total_burst += nb_burst;
743 if (nb_burst > burst_stats[0]) {
744 burst_stats[1] = burst_stats[0];
745 pktnb_stats[1] = pktnb_stats[0];
746 burst_stats[0] = nb_burst;
747 pktnb_stats[0] = nb_pkt;
750 if (total_burst == 0)
752 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
753 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
754 burst_percent[0], (int) pktnb_stats[0]);
755 if (burst_stats[0] == total_burst) {
759 if (burst_stats[0] + burst_stats[1] == total_burst) {
760 printf(" + %d%% of %d pkts]\n",
761 100 - burst_percent[0], pktnb_stats[1]);
764 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
765 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
766 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
767 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
770 printf(" + %d%% of %d pkts + %d%% of others]\n",
771 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
773 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
776 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
778 struct rte_port *port;
781 static const char *fwd_stats_border = "----------------------";
783 port = &ports[port_id];
784 printf("\n %s Forward statistics for port %-2d %s\n",
785 fwd_stats_border, port_id, fwd_stats_border);
787 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
788 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
790 stats->ipackets, stats->imissed,
791 (uint64_t) (stats->ipackets + stats->imissed));
793 if (cur_fwd_eng == &csum_fwd_engine)
794 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
795 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
796 if ((stats->ierrors + stats->rx_nombuf) > 0) {
797 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
798 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
801 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
803 stats->opackets, port->tx_dropped,
804 (uint64_t) (stats->opackets + port->tx_dropped));
807 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
809 stats->ipackets, stats->imissed,
810 (uint64_t) (stats->ipackets + stats->imissed));
812 if (cur_fwd_eng == &csum_fwd_engine)
813 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
814 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
815 if ((stats->ierrors + stats->rx_nombuf) > 0) {
816 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
817 printf(" RX-nombufs: %14"PRIu64"\n",
821 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
823 stats->opackets, port->tx_dropped,
824 (uint64_t) (stats->opackets + port->tx_dropped));
827 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
829 pkt_burst_stats_display("RX",
830 &port->rx_stream->rx_burst_stats);
832 pkt_burst_stats_display("TX",
833 &port->tx_stream->tx_burst_stats);
836 if (port->rx_queue_stats_mapping_enabled) {
838 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
839 printf(" Stats reg %2d RX-packets:%14"PRIu64
840 " RX-errors:%14"PRIu64
841 " RX-bytes:%14"PRIu64"\n",
842 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
846 if (port->tx_queue_stats_mapping_enabled) {
847 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
848 printf(" Stats reg %2d TX-packets:%14"PRIu64
849 " TX-bytes:%14"PRIu64"\n",
850 i, stats->q_opackets[i], stats->q_obytes[i]);
854 printf(" %s--------------------------------%s\n",
855 fwd_stats_border, fwd_stats_border);
859 fwd_stream_stats_display(streamid_t stream_id)
861 struct fwd_stream *fs;
862 static const char *fwd_top_stats_border = "-------";
864 fs = fwd_streams[stream_id];
865 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
866 (fs->fwd_dropped == 0))
868 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
869 "TX Port=%2d/Queue=%2d %s\n",
870 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
871 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
872 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
873 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
875 /* if checksum mode */
876 if (cur_fwd_eng == &csum_fwd_engine) {
877 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
878 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
881 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
882 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
883 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
888 flush_fwd_rx_queues(void)
890 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
897 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
898 uint64_t timer_period;
900 /* convert to number of cycles */
901 timer_period = rte_get_timer_hz(); /* 1 second timeout */
903 for (j = 0; j < 2; j++) {
904 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
905 for (rxq = 0; rxq < nb_rxq; rxq++) {
906 port_id = fwd_ports_ids[rxp];
908 * testpmd can stuck in the below do while loop
909 * if rte_eth_rx_burst() always returns nonzero
910 * packets. So timer is added to exit this loop
911 * after 1sec timer expiry.
913 prev_tsc = rte_rdtsc();
915 nb_rx = rte_eth_rx_burst(port_id, rxq,
916 pkts_burst, MAX_PKT_BURST);
917 for (i = 0; i < nb_rx; i++)
918 rte_pktmbuf_free(pkts_burst[i]);
920 cur_tsc = rte_rdtsc();
921 diff_tsc = cur_tsc - prev_tsc;
922 timer_tsc += diff_tsc;
923 } while ((nb_rx > 0) &&
924 (timer_tsc < timer_period));
928 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
933 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
935 struct fwd_stream **fsm;
938 #ifdef RTE_LIBRTE_BITRATE
939 uint64_t tics_per_1sec;
941 uint64_t tics_current;
942 uint8_t idx_port, cnt_ports;
944 cnt_ports = rte_eth_dev_count();
945 tics_datum = rte_rdtsc();
946 tics_per_1sec = rte_get_timer_hz();
948 fsm = &fwd_streams[fc->stream_idx];
949 nb_fs = fc->stream_nb;
951 for (sm_id = 0; sm_id < nb_fs; sm_id++)
952 (*pkt_fwd)(fsm[sm_id]);
953 #ifdef RTE_LIBRTE_BITRATE
954 tics_current = rte_rdtsc();
955 if (tics_current - tics_datum >= tics_per_1sec) {
956 /* Periodic bitrate calculation */
957 for (idx_port = 0; idx_port < cnt_ports; idx_port++)
958 rte_stats_bitrate_calc(bitrate_data, idx_port);
959 tics_datum = tics_current;
962 #ifdef RTE_LIBRTE_LATENCY_STATS
963 if (latencystats_lcore_id == rte_lcore_id())
964 rte_latencystats_update();
967 } while (! fc->stopped);
971 start_pkt_forward_on_core(void *fwd_arg)
973 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
974 cur_fwd_config.fwd_eng->packet_fwd);
979 * Run the TXONLY packet forwarding engine to send a single burst of packets.
980 * Used to start communication flows in network loopback test configurations.
983 run_one_txonly_burst_on_core(void *fwd_arg)
985 struct fwd_lcore *fwd_lc;
986 struct fwd_lcore tmp_lcore;
988 fwd_lc = (struct fwd_lcore *) fwd_arg;
990 tmp_lcore.stopped = 1;
991 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
996 * Launch packet forwarding:
997 * - Setup per-port forwarding context.
998 * - launch logical cores with their forwarding configuration.
1001 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1003 port_fwd_begin_t port_fwd_begin;
1008 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1009 if (port_fwd_begin != NULL) {
1010 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1011 (*port_fwd_begin)(fwd_ports_ids[i]);
1013 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1014 lc_id = fwd_lcores_cpuids[i];
1015 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1016 fwd_lcores[i]->stopped = 0;
1017 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1018 fwd_lcores[i], lc_id);
1020 printf("launch lcore %u failed - diag=%d\n",
1027 * Launch packet forwarding configuration.
1030 start_packet_forwarding(int with_tx_first)
1032 port_fwd_begin_t port_fwd_begin;
1033 port_fwd_end_t port_fwd_end;
1034 struct rte_port *port;
1039 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1040 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1042 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1043 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1045 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1046 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1047 (!nb_rxq || !nb_txq))
1048 rte_exit(EXIT_FAILURE,
1049 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1050 cur_fwd_eng->fwd_mode_name);
1052 if (all_ports_started() == 0) {
1053 printf("Not all ports were started\n");
1056 if (test_done == 0) {
1057 printf("Packet forwarding already started\n");
1061 if (init_fwd_streams() < 0) {
1062 printf("Fail from init_fwd_streams()\n");
1067 for (i = 0; i < nb_fwd_ports; i++) {
1068 pt_id = fwd_ports_ids[i];
1069 port = &ports[pt_id];
1070 if (!port->dcb_flag) {
1071 printf("In DCB mode, all forwarding ports must "
1072 "be configured in this mode.\n");
1076 if (nb_fwd_lcores == 1) {
1077 printf("In DCB mode,the nb forwarding cores "
1078 "should be larger than 1.\n");
1085 flush_fwd_rx_queues();
1088 pkt_fwd_config_display(&cur_fwd_config);
1089 rxtx_config_display();
1091 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1092 pt_id = fwd_ports_ids[i];
1093 port = &ports[pt_id];
1094 rte_eth_stats_get(pt_id, &port->stats);
1095 port->tx_dropped = 0;
1097 map_port_queue_stats_mapping_registers(pt_id, port);
1099 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1100 fwd_streams[sm_id]->rx_packets = 0;
1101 fwd_streams[sm_id]->tx_packets = 0;
1102 fwd_streams[sm_id]->fwd_dropped = 0;
1103 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1104 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1106 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1107 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1108 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1109 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1110 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1112 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1113 fwd_streams[sm_id]->core_cycles = 0;
1116 if (with_tx_first) {
1117 port_fwd_begin = tx_only_engine.port_fwd_begin;
1118 if (port_fwd_begin != NULL) {
1119 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1120 (*port_fwd_begin)(fwd_ports_ids[i]);
1122 while (with_tx_first--) {
1123 launch_packet_forwarding(
1124 run_one_txonly_burst_on_core);
1125 rte_eal_mp_wait_lcore();
1127 port_fwd_end = tx_only_engine.port_fwd_end;
1128 if (port_fwd_end != NULL) {
1129 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1130 (*port_fwd_end)(fwd_ports_ids[i]);
1133 launch_packet_forwarding(start_pkt_forward_on_core);
1137 stop_packet_forwarding(void)
1139 struct rte_eth_stats stats;
1140 struct rte_port *port;
1141 port_fwd_end_t port_fwd_end;
1146 uint64_t total_recv;
1147 uint64_t total_xmit;
1148 uint64_t total_rx_dropped;
1149 uint64_t total_tx_dropped;
1150 uint64_t total_rx_nombuf;
1151 uint64_t tx_dropped;
1152 uint64_t rx_bad_ip_csum;
1153 uint64_t rx_bad_l4_csum;
1154 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1155 uint64_t fwd_cycles;
1157 static const char *acc_stats_border = "+++++++++++++++";
1160 printf("Packet forwarding not started\n");
1163 printf("Telling cores to stop...");
1164 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1165 fwd_lcores[lc_id]->stopped = 1;
1166 printf("\nWaiting for lcores to finish...\n");
1167 rte_eal_mp_wait_lcore();
1168 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1169 if (port_fwd_end != NULL) {
1170 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1171 pt_id = fwd_ports_ids[i];
1172 (*port_fwd_end)(pt_id);
1175 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1178 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1179 if (cur_fwd_config.nb_fwd_streams >
1180 cur_fwd_config.nb_fwd_ports) {
1181 fwd_stream_stats_display(sm_id);
1182 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1183 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1185 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1187 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1190 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1191 tx_dropped = (uint64_t) (tx_dropped +
1192 fwd_streams[sm_id]->fwd_dropped);
1193 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1196 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1197 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1198 fwd_streams[sm_id]->rx_bad_ip_csum);
1199 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1203 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1204 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1205 fwd_streams[sm_id]->rx_bad_l4_csum);
1206 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1209 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1210 fwd_cycles = (uint64_t) (fwd_cycles +
1211 fwd_streams[sm_id]->core_cycles);
1216 total_rx_dropped = 0;
1217 total_tx_dropped = 0;
1218 total_rx_nombuf = 0;
1219 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1220 pt_id = fwd_ports_ids[i];
1222 port = &ports[pt_id];
1223 rte_eth_stats_get(pt_id, &stats);
1224 stats.ipackets -= port->stats.ipackets;
1225 port->stats.ipackets = 0;
1226 stats.opackets -= port->stats.opackets;
1227 port->stats.opackets = 0;
1228 stats.ibytes -= port->stats.ibytes;
1229 port->stats.ibytes = 0;
1230 stats.obytes -= port->stats.obytes;
1231 port->stats.obytes = 0;
1232 stats.imissed -= port->stats.imissed;
1233 port->stats.imissed = 0;
1234 stats.oerrors -= port->stats.oerrors;
1235 port->stats.oerrors = 0;
1236 stats.rx_nombuf -= port->stats.rx_nombuf;
1237 port->stats.rx_nombuf = 0;
1239 total_recv += stats.ipackets;
1240 total_xmit += stats.opackets;
1241 total_rx_dropped += stats.imissed;
1242 total_tx_dropped += port->tx_dropped;
1243 total_rx_nombuf += stats.rx_nombuf;
1245 fwd_port_stats_display(pt_id, &stats);
1247 printf("\n %s Accumulated forward statistics for all ports"
1249 acc_stats_border, acc_stats_border);
1250 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1252 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1254 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1255 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1256 if (total_rx_nombuf > 0)
1257 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1258 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1260 acc_stats_border, acc_stats_border);
1261 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1263 printf("\n CPU cycles/packet=%u (total cycles="
1264 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1265 (unsigned int)(fwd_cycles / total_recv),
1266 fwd_cycles, total_recv);
1268 printf("\nDone.\n");
1273 dev_set_link_up(portid_t pid)
1275 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1276 printf("\nSet link up fail.\n");
1280 dev_set_link_down(portid_t pid)
1282 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1283 printf("\nSet link down fail.\n");
1287 all_ports_started(void)
1290 struct rte_port *port;
1292 RTE_ETH_FOREACH_DEV(pi) {
1294 /* Check if there is a port which is not started */
1295 if ((port->port_status != RTE_PORT_STARTED) &&
1296 (port->slave_flag == 0))
1300 /* No port is not started */
1305 all_ports_stopped(void)
1308 struct rte_port *port;
1310 RTE_ETH_FOREACH_DEV(pi) {
1312 if ((port->port_status != RTE_PORT_STOPPED) &&
1313 (port->slave_flag == 0))
1321 port_is_started(portid_t port_id)
1323 if (port_id_is_invalid(port_id, ENABLED_WARN))
1326 if (ports[port_id].port_status != RTE_PORT_STARTED)
1333 port_is_closed(portid_t port_id)
1335 if (port_id_is_invalid(port_id, ENABLED_WARN))
1338 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1345 start_port(portid_t pid)
1347 int diag, need_check_link_status = -1;
1350 struct rte_port *port;
1351 struct ether_addr mac_addr;
1352 enum rte_eth_event_type event_type;
1354 if (port_id_is_invalid(pid, ENABLED_WARN))
1359 RTE_ETH_FOREACH_DEV(pi) {
1360 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1363 need_check_link_status = 0;
1365 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1366 RTE_PORT_HANDLING) == 0) {
1367 printf("Port %d is now not stopped\n", pi);
1371 if (port->need_reconfig > 0) {
1372 port->need_reconfig = 0;
1374 printf("Configuring Port %d (socket %u)\n", pi,
1376 /* configure port */
1377 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1380 if (rte_atomic16_cmpset(&(port->port_status),
1381 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1382 printf("Port %d can not be set back "
1383 "to stopped\n", pi);
1384 printf("Fail to configure port %d\n", pi);
1385 /* try to reconfigure port next time */
1386 port->need_reconfig = 1;
1390 if (port->need_reconfig_queues > 0) {
1391 port->need_reconfig_queues = 0;
1392 /* setup tx queues */
1393 for (qi = 0; qi < nb_txq; qi++) {
1394 if ((numa_support) &&
1395 (txring_numa[pi] != NUMA_NO_CONFIG))
1396 diag = rte_eth_tx_queue_setup(pi, qi,
1397 nb_txd,txring_numa[pi],
1400 diag = rte_eth_tx_queue_setup(pi, qi,
1401 nb_txd,port->socket_id,
1407 /* Fail to setup tx queue, return */
1408 if (rte_atomic16_cmpset(&(port->port_status),
1410 RTE_PORT_STOPPED) == 0)
1411 printf("Port %d can not be set back "
1412 "to stopped\n", pi);
1413 printf("Fail to configure port %d tx queues\n", pi);
1414 /* try to reconfigure queues next time */
1415 port->need_reconfig_queues = 1;
1418 /* setup rx queues */
1419 for (qi = 0; qi < nb_rxq; qi++) {
1420 if ((numa_support) &&
1421 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1422 struct rte_mempool * mp =
1423 mbuf_pool_find(rxring_numa[pi]);
1425 printf("Failed to setup RX queue:"
1426 "No mempool allocation"
1427 " on the socket %d\n",
1432 diag = rte_eth_rx_queue_setup(pi, qi,
1433 nb_rxd,rxring_numa[pi],
1434 &(port->rx_conf),mp);
1436 struct rte_mempool *mp =
1437 mbuf_pool_find(port->socket_id);
1439 printf("Failed to setup RX queue:"
1440 "No mempool allocation"
1441 " on the socket %d\n",
1445 diag = rte_eth_rx_queue_setup(pi, qi,
1446 nb_rxd,port->socket_id,
1447 &(port->rx_conf), mp);
1452 /* Fail to setup rx queue, return */
1453 if (rte_atomic16_cmpset(&(port->port_status),
1455 RTE_PORT_STOPPED) == 0)
1456 printf("Port %d can not be set back "
1457 "to stopped\n", pi);
1458 printf("Fail to configure port %d rx queues\n", pi);
1459 /* try to reconfigure queues next time */
1460 port->need_reconfig_queues = 1;
1465 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1466 event_type < RTE_ETH_EVENT_MAX;
1468 diag = rte_eth_dev_callback_register(pi,
1473 printf("Failed to setup even callback for event %d\n",
1480 if (rte_eth_dev_start(pi) < 0) {
1481 printf("Fail to start port %d\n", pi);
1483 /* Fail to setup rx queue, return */
1484 if (rte_atomic16_cmpset(&(port->port_status),
1485 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1486 printf("Port %d can not be set back to "
1491 if (rte_atomic16_cmpset(&(port->port_status),
1492 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1493 printf("Port %d can not be set into started\n", pi);
1495 rte_eth_macaddr_get(pi, &mac_addr);
1496 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1497 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1498 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1499 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1501 /* at least one port started, need checking link status */
1502 need_check_link_status = 1;
1505 if (need_check_link_status == 1 && !no_link_check)
1506 check_all_ports_link_status(RTE_PORT_ALL);
1507 else if (need_check_link_status == 0)
1508 printf("Please stop the ports first\n");
1515 stop_port(portid_t pid)
1518 struct rte_port *port;
1519 int need_check_link_status = 0;
1526 if (port_id_is_invalid(pid, ENABLED_WARN))
1529 printf("Stopping ports...\n");
1531 RTE_ETH_FOREACH_DEV(pi) {
1532 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1535 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1536 printf("Please remove port %d from forwarding configuration.\n", pi);
1540 if (port_is_bonding_slave(pi)) {
1541 printf("Please remove port %d from bonded device.\n", pi);
1546 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1547 RTE_PORT_HANDLING) == 0)
1550 rte_eth_dev_stop(pi);
1552 if (rte_atomic16_cmpset(&(port->port_status),
1553 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1554 printf("Port %d can not be set into stopped\n", pi);
1555 need_check_link_status = 1;
1557 if (need_check_link_status && !no_link_check)
1558 check_all_ports_link_status(RTE_PORT_ALL);
1564 close_port(portid_t pid)
1567 struct rte_port *port;
1569 if (port_id_is_invalid(pid, ENABLED_WARN))
1572 printf("Closing ports...\n");
1574 RTE_ETH_FOREACH_DEV(pi) {
1575 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1578 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1579 printf("Please remove port %d from forwarding configuration.\n", pi);
1583 if (port_is_bonding_slave(pi)) {
1584 printf("Please remove port %d from bonded device.\n", pi);
1589 if (rte_atomic16_cmpset(&(port->port_status),
1590 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1591 printf("Port %d is already closed\n", pi);
1595 if (rte_atomic16_cmpset(&(port->port_status),
1596 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1597 printf("Port %d is now not stopped\n", pi);
1601 if (port->flow_list)
1602 port_flow_flush(pi);
1603 rte_eth_dev_close(pi);
1605 if (rte_atomic16_cmpset(&(port->port_status),
1606 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1607 printf("Port %d cannot be set to closed\n", pi);
1614 attach_port(char *identifier)
1617 unsigned int socket_id;
1619 printf("Attaching a new port...\n");
1621 if (identifier == NULL) {
1622 printf("Invalid parameters are specified\n");
1626 if (rte_eth_dev_attach(identifier, &pi))
1629 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1630 /* if socket_id is invalid, set to 0 */
1631 if (check_socket_id(socket_id) < 0)
1633 reconfig(pi, socket_id);
1634 rte_eth_promiscuous_enable(pi);
1636 nb_ports = rte_eth_dev_count();
1638 ports[pi].port_status = RTE_PORT_STOPPED;
1640 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1645 detach_port(uint8_t port_id)
1647 char name[RTE_ETH_NAME_MAX_LEN];
1649 printf("Detaching a port...\n");
1651 if (!port_is_closed(port_id)) {
1652 printf("Please close port first\n");
1656 if (ports[port_id].flow_list)
1657 port_flow_flush(port_id);
1659 if (rte_eth_dev_detach(port_id, name))
1662 nb_ports = rte_eth_dev_count();
1664 printf("Port '%s' is detached. Now total ports is %d\n",
1676 stop_packet_forwarding();
1678 if (ports != NULL) {
1680 RTE_ETH_FOREACH_DEV(pt_id) {
1681 printf("\nShutting down port %d...\n", pt_id);
1687 printf("\nBye...\n");
1690 typedef void (*cmd_func_t)(void);
1691 struct pmd_test_command {
1692 const char *cmd_name;
1693 cmd_func_t cmd_func;
1696 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1698 /* Check the link status of all ports in up to 9s, and print them finally */
1700 check_all_ports_link_status(uint32_t port_mask)
1702 #define CHECK_INTERVAL 100 /* 100ms */
1703 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1704 uint8_t portid, count, all_ports_up, print_flag = 0;
1705 struct rte_eth_link link;
1707 printf("Checking link statuses...\n");
1709 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1711 RTE_ETH_FOREACH_DEV(portid) {
1712 if ((port_mask & (1 << portid)) == 0)
1714 memset(&link, 0, sizeof(link));
1715 rte_eth_link_get_nowait(portid, &link);
1716 /* print link status if flag set */
1717 if (print_flag == 1) {
1718 if (link.link_status)
1719 printf("Port %d Link Up - speed %u "
1720 "Mbps - %s\n", (uint8_t)portid,
1721 (unsigned)link.link_speed,
1722 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1723 ("full-duplex") : ("half-duplex\n"));
1725 printf("Port %d Link Down\n",
1729 /* clear all_ports_up flag if any link down */
1730 if (link.link_status == ETH_LINK_DOWN) {
1735 /* after finally printing all link status, get out */
1736 if (print_flag == 1)
1739 if (all_ports_up == 0) {
1741 rte_delay_ms(CHECK_INTERVAL);
1744 /* set the print_flag if all ports up or timeout */
1745 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1755 rmv_event_callback(void *arg)
1757 struct rte_eth_dev *dev;
1758 struct rte_devargs *da;
1760 uint8_t port_id = (intptr_t)arg;
1762 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1763 dev = &rte_eth_devices[port_id];
1764 da = dev->device->devargs;
1767 close_port(port_id);
1768 if (da->type == RTE_DEVTYPE_VIRTUAL)
1769 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1770 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1771 rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1772 printf("removing device %s\n", name);
1773 rte_eal_dev_detach(name);
1774 dev->state = RTE_ETH_DEV_UNUSED;
1777 /* This function is used by the interrupt thread */
1779 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1781 static const char * const event_desc[] = {
1782 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1783 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1784 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1785 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1786 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1787 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1788 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1789 [RTE_ETH_EVENT_MAX] = NULL,
1792 RTE_SET_USED(param);
1794 if (type >= RTE_ETH_EVENT_MAX) {
1795 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1796 port_id, __func__, type);
1799 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1805 case RTE_ETH_EVENT_INTR_RMV:
1806 if (rte_eal_alarm_set(100000,
1807 rmv_event_callback, (void *)(intptr_t)port_id))
1808 fprintf(stderr, "Could not set up deferred device removal\n");
1816 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1820 uint8_t mapping_found = 0;
1822 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1823 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1824 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1825 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1826 tx_queue_stats_mappings[i].queue_id,
1827 tx_queue_stats_mappings[i].stats_counter_id);
1834 port->tx_queue_stats_mapping_enabled = 1;
1839 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1843 uint8_t mapping_found = 0;
1845 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1846 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1847 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1848 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1849 rx_queue_stats_mappings[i].queue_id,
1850 rx_queue_stats_mappings[i].stats_counter_id);
1857 port->rx_queue_stats_mapping_enabled = 1;
1862 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1866 diag = set_tx_queue_stats_mapping_registers(pi, port);
1868 if (diag == -ENOTSUP) {
1869 port->tx_queue_stats_mapping_enabled = 0;
1870 printf("TX queue stats mapping not supported port id=%d\n", pi);
1873 rte_exit(EXIT_FAILURE,
1874 "set_tx_queue_stats_mapping_registers "
1875 "failed for port id=%d diag=%d\n",
1879 diag = set_rx_queue_stats_mapping_registers(pi, port);
1881 if (diag == -ENOTSUP) {
1882 port->rx_queue_stats_mapping_enabled = 0;
1883 printf("RX queue stats mapping not supported port id=%d\n", pi);
1886 rte_exit(EXIT_FAILURE,
1887 "set_rx_queue_stats_mapping_registers "
1888 "failed for port id=%d diag=%d\n",
1894 rxtx_port_config(struct rte_port *port)
1896 port->rx_conf = port->dev_info.default_rxconf;
1897 port->tx_conf = port->dev_info.default_txconf;
1899 /* Check if any RX/TX parameters have been passed */
1900 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1901 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1903 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1904 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1906 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1907 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1909 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1910 port->rx_conf.rx_free_thresh = rx_free_thresh;
1912 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1913 port->rx_conf.rx_drop_en = rx_drop_en;
1915 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1916 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1918 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1919 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1921 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1922 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1924 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1925 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1927 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1928 port->tx_conf.tx_free_thresh = tx_free_thresh;
1930 if (txq_flags != RTE_PMD_PARAM_UNSET)
1931 port->tx_conf.txq_flags = txq_flags;
1935 init_port_config(void)
1938 struct rte_port *port;
1940 RTE_ETH_FOREACH_DEV(pid) {
1942 port->dev_conf.rxmode = rx_mode;
1943 port->dev_conf.fdir_conf = fdir_conf;
1945 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1946 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1948 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1949 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1952 if (port->dcb_flag == 0) {
1953 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1954 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1956 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1959 rxtx_port_config(port);
1961 rte_eth_macaddr_get(pid, &port->eth_addr);
1963 map_port_queue_stats_mapping_registers(pid, port);
1964 #ifdef RTE_NIC_BYPASS
1965 rte_eth_dev_bypass_init(pid);
1968 if (lsc_interrupt &&
1969 (rte_eth_devices[pid].data->dev_flags &
1970 RTE_ETH_DEV_INTR_LSC))
1971 port->dev_conf.intr_conf.lsc = 1;
1972 if (rmv_interrupt &&
1973 (rte_eth_devices[pid].data->dev_flags &
1974 RTE_ETH_DEV_INTR_RMV))
1975 port->dev_conf.intr_conf.rmv = 1;
1979 void set_port_slave_flag(portid_t slave_pid)
1981 struct rte_port *port;
1983 port = &ports[slave_pid];
1984 port->slave_flag = 1;
1987 void clear_port_slave_flag(portid_t slave_pid)
1989 struct rte_port *port;
1991 port = &ports[slave_pid];
1992 port->slave_flag = 0;
1995 uint8_t port_is_bonding_slave(portid_t slave_pid)
1997 struct rte_port *port;
1999 port = &ports[slave_pid];
2000 return port->slave_flag;
2003 const uint16_t vlan_tags[] = {
2004 0, 1, 2, 3, 4, 5, 6, 7,
2005 8, 9, 10, 11, 12, 13, 14, 15,
2006 16, 17, 18, 19, 20, 21, 22, 23,
2007 24, 25, 26, 27, 28, 29, 30, 31
2011 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2012 enum dcb_mode_enable dcb_mode,
2013 enum rte_eth_nb_tcs num_tcs,
2019 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2020 * given above, and the number of traffic classes available for use.
2022 if (dcb_mode == DCB_VT_ENABLED) {
2023 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2024 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2025 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2026 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2028 /* VMDQ+DCB RX and TX configurations */
2029 vmdq_rx_conf->enable_default_pool = 0;
2030 vmdq_rx_conf->default_pool = 0;
2031 vmdq_rx_conf->nb_queue_pools =
2032 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2033 vmdq_tx_conf->nb_queue_pools =
2034 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2036 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2037 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2038 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2039 vmdq_rx_conf->pool_map[i].pools =
2040 1 << (i % vmdq_rx_conf->nb_queue_pools);
2042 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2043 vmdq_rx_conf->dcb_tc[i] = i;
2044 vmdq_tx_conf->dcb_tc[i] = i;
2047 /* set DCB mode of RX and TX of multiple queues */
2048 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2049 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2051 struct rte_eth_dcb_rx_conf *rx_conf =
2052 ð_conf->rx_adv_conf.dcb_rx_conf;
2053 struct rte_eth_dcb_tx_conf *tx_conf =
2054 ð_conf->tx_adv_conf.dcb_tx_conf;
2056 rx_conf->nb_tcs = num_tcs;
2057 tx_conf->nb_tcs = num_tcs;
2059 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2060 rx_conf->dcb_tc[i] = i % num_tcs;
2061 tx_conf->dcb_tc[i] = i % num_tcs;
2063 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2064 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2065 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2069 eth_conf->dcb_capability_en =
2070 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2072 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2078 init_port_dcb_config(portid_t pid,
2079 enum dcb_mode_enable dcb_mode,
2080 enum rte_eth_nb_tcs num_tcs,
2083 struct rte_eth_conf port_conf;
2084 struct rte_port *rte_port;
2088 rte_port = &ports[pid];
2090 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2091 /* Enter DCB configuration status */
2094 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2095 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2098 port_conf.rxmode.hw_vlan_filter = 1;
2101 * Write the configuration into the device.
2102 * Set the numbers of RX & TX queues to 0, so
2103 * the RX & TX queues will not be setup.
2105 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2107 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2109 /* If dev_info.vmdq_pool_base is greater than 0,
2110 * the queue id of vmdq pools is started after pf queues.
2112 if (dcb_mode == DCB_VT_ENABLED &&
2113 rte_port->dev_info.vmdq_pool_base > 0) {
2114 printf("VMDQ_DCB multi-queue mode is nonsensical"
2115 " for port %d.", pid);
2119 /* Assume the ports in testpmd have the same dcb capability
2120 * and has the same number of rxq and txq in dcb mode
2122 if (dcb_mode == DCB_VT_ENABLED) {
2123 if (rte_port->dev_info.max_vfs > 0) {
2124 nb_rxq = rte_port->dev_info.nb_rx_queues;
2125 nb_txq = rte_port->dev_info.nb_tx_queues;
2127 nb_rxq = rte_port->dev_info.max_rx_queues;
2128 nb_txq = rte_port->dev_info.max_tx_queues;
2131 /*if vt is disabled, use all pf queues */
2132 if (rte_port->dev_info.vmdq_pool_base == 0) {
2133 nb_rxq = rte_port->dev_info.max_rx_queues;
2134 nb_txq = rte_port->dev_info.max_tx_queues;
2136 nb_rxq = (queueid_t)num_tcs;
2137 nb_txq = (queueid_t)num_tcs;
2141 rx_free_thresh = 64;
2143 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2145 rxtx_port_config(rte_port);
2147 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2148 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2149 rx_vft_set(pid, vlan_tags[i], 1);
2151 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2152 map_port_queue_stats_mapping_registers(pid, rte_port);
2154 rte_port->dcb_flag = 1;
2162 /* Configuration of Ethernet ports. */
2163 ports = rte_zmalloc("testpmd: ports",
2164 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2165 RTE_CACHE_LINE_SIZE);
2166 if (ports == NULL) {
2167 rte_exit(EXIT_FAILURE,
2168 "rte_zmalloc(%d struct rte_port) failed\n",
2181 signal_handler(int signum)
2183 if (signum == SIGINT || signum == SIGTERM) {
2184 printf("\nSignal %d received, preparing to exit...\n",
2186 #ifdef RTE_LIBRTE_PDUMP
2187 /* uninitialize packet capture framework */
2190 #ifdef RTE_LIBRTE_LATENCY_STATS
2191 rte_latencystats_uninit();
2194 /* exit with the expected status */
2195 signal(signum, SIG_DFL);
2196 kill(getpid(), signum);
2201 main(int argc, char** argv)
2206 signal(SIGINT, signal_handler);
2207 signal(SIGTERM, signal_handler);
2209 diag = rte_eal_init(argc, argv);
2211 rte_panic("Cannot init EAL\n");
2213 #ifdef RTE_LIBRTE_PDUMP
2214 /* initialize packet capture framework */
2215 rte_pdump_init(NULL);
2218 nb_ports = (portid_t) rte_eth_dev_count();
2220 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2222 /* allocate port structures, and init them */
2225 set_def_fwd_config();
2227 rte_panic("Empty set of forwarding logical cores - check the "
2228 "core mask supplied in the command parameters\n");
2233 launch_args_parse(argc, argv);
2235 if (!nb_rxq && !nb_txq)
2236 printf("Warning: Either rx or tx queues should be non-zero\n");
2238 if (nb_rxq > 1 && nb_rxq > nb_txq)
2239 printf("Warning: nb_rxq=%d enables RSS configuration, "
2240 "but nb_txq=%d will prevent to fully test it.\n",
2244 if (start_port(RTE_PORT_ALL) != 0)
2245 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2247 /* set all ports to promiscuous mode by default */
2248 RTE_ETH_FOREACH_DEV(port_id)
2249 rte_eth_promiscuous_enable(port_id);
2251 /* Init metrics library */
2252 rte_metrics_init(rte_socket_id());
2254 #ifdef RTE_LIBRTE_LATENCY_STATS
2255 if (latencystats_enabled != 0) {
2256 int ret = rte_latencystats_init(1, NULL);
2258 printf("Warning: latencystats init()"
2259 " returned error %d\n", ret);
2260 printf("Latencystats running on lcore %d\n",
2261 latencystats_lcore_id);
2265 /* Setup bitrate stats */
2266 #ifdef RTE_LIBRTE_BITRATE
2267 bitrate_data = rte_stats_bitrate_create();
2268 if (bitrate_data == NULL)
2269 rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2270 rte_stats_bitrate_reg(bitrate_data);
2274 #ifdef RTE_LIBRTE_CMDLINE
2275 if (interactive == 1) {
2277 printf("Start automatic packet forwarding\n");
2278 start_packet_forwarding(0);
2288 printf("No commandline core given, start packet forwarding\n");
2289 start_packet_forwarding(0);
2290 printf("Press enter to exit\n");
2291 rc = read(0, &c, 1);