4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
101 * NUMA support configuration.
102 * When set, the NUMA support attempts to dispatch the allocation of the
103 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104 * probed ports among the CPU sockets 0 and 1.
105 * Otherwise, all memory is allocated from CPU socket 0.
107 uint8_t numa_support = 1; /**< numa enabled by default */
110 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113 uint8_t socket_num = UMA_NO_CONFIG;
116 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
121 * Record the Ethernet address of peer target ports to which packets are
123 * Must be instantiated with the ethernet addresses of peer traffic generator
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
130 * Probed Target Environment.
132 struct rte_port *ports; /**< For all probed ethernet ports. */
133 portid_t nb_ports; /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
138 * Test Forwarding Configuration.
139 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t nb_cfg_ports; /**< Number of configured ports. */
145 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
154 * Forwarding engines.
156 struct fwd_engine * fwd_engines[] = {
165 #ifdef RTE_LIBRTE_IEEE1588
166 &ieee1588_fwd_engine,
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
179 * specified on command-line. */
182 * Configuration of packet segments used by the "txonly" processing engine.
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 TXONLY_DEF_PACKET_LEN,
188 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
203 * Configurable number of RX/TX queues.
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 * Configurable number of RX/TX ring descriptors.
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
216 #define RTE_PMD_PARAM_UNSET -1
218 * Configurable values of RX and TX ring threshold registers.
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 * Configurable value of RX free threshold.
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX drop enable.
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX free threshold.
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX RS bit threshold.
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX queue flags.
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Avoids to check link status when starting/stopping a port.
272 uint8_t no_link_check = 0; /* check by default */
275 * Enable link status change notification
277 uint8_t lsc_interrupt = 1; /* enabled by default */
280 * Enable device removal notification.
282 uint8_t rmv_interrupt = 1; /* enabled by default */
285 * Display or mask ether events
286 * Default to all events except VF_MBOX
288 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
289 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
290 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
291 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
292 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
293 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
296 * NIC bypass mode configuration options.
298 #ifdef RTE_NIC_BYPASS
300 /* The NIC bypass watchdog timeout. */
301 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
305 #ifdef RTE_LIBRTE_LATENCY_STATS
308 * Set when latency stats is enabled in the commandline
310 uint8_t latencystats_enabled;
313 * Lcore ID to serive latency statistics.
315 lcoreid_t latencystats_lcore_id = -1;
320 * Ethernet device configuration.
322 struct rte_eth_rxmode rx_mode = {
323 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
325 .header_split = 0, /**< Header Split disabled. */
326 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
327 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
328 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
329 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
330 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
331 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
334 struct rte_fdir_conf fdir_conf = {
335 .mode = RTE_FDIR_MODE_NONE,
336 .pballoc = RTE_FDIR_PBALLOC_64K,
337 .status = RTE_FDIR_REPORT_STATUS,
339 .vlan_tci_mask = 0x0,
341 .src_ip = 0xFFFFFFFF,
342 .dst_ip = 0xFFFFFFFF,
345 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
346 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
348 .src_port_mask = 0xFFFF,
349 .dst_port_mask = 0xFFFF,
350 .mac_addr_byte_mask = 0xFF,
351 .tunnel_type_mask = 1,
352 .tunnel_id_mask = 0xFFFFFFFF,
357 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
359 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
360 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
362 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
363 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
365 uint16_t nb_tx_queue_stats_mappings = 0;
366 uint16_t nb_rx_queue_stats_mappings = 0;
368 unsigned max_socket = 0;
370 #ifdef RTE_LIBRTE_BITRATE
371 /* Bitrate statistics */
372 struct rte_stats_bitrates *bitrate_data;
373 lcoreid_t bitrate_lcore_id;
374 uint8_t bitrate_enabled;
377 /* Forward function declarations */
378 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
379 static void check_all_ports_link_status(uint32_t port_mask);
380 static void eth_event_callback(uint8_t port_id,
381 enum rte_eth_event_type type,
385 * Check if all the ports are started.
386 * If yes, return positive value. If not, return zero.
388 static int all_ports_started(void);
391 * Setup default configuration.
394 set_default_fwd_lcores_config(void)
398 unsigned int sock_num;
401 for (i = 0; i < RTE_MAX_LCORE; i++) {
402 sock_num = rte_lcore_to_socket_id(i) + 1;
403 if (sock_num > max_socket) {
404 if (sock_num > RTE_MAX_NUMA_NODES)
405 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
406 max_socket = sock_num;
408 if (!rte_lcore_is_enabled(i))
410 if (i == rte_get_master_lcore())
412 fwd_lcores_cpuids[nb_lc++] = i;
414 nb_lcores = (lcoreid_t) nb_lc;
415 nb_cfg_lcores = nb_lcores;
420 set_def_peer_eth_addrs(void)
424 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
425 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
426 peer_eth_addrs[i].addr_bytes[5] = i;
431 set_default_fwd_ports_config(void)
435 for (pt_id = 0; pt_id < nb_ports; pt_id++)
436 fwd_ports_ids[pt_id] = pt_id;
438 nb_cfg_ports = nb_ports;
439 nb_fwd_ports = nb_ports;
443 set_def_fwd_config(void)
445 set_default_fwd_lcores_config();
446 set_def_peer_eth_addrs();
447 set_default_fwd_ports_config();
451 * Configuration initialisation done once at init time.
454 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
455 unsigned int socket_id)
457 char pool_name[RTE_MEMPOOL_NAMESIZE];
458 struct rte_mempool *rte_mp = NULL;
461 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
462 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
465 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
466 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
468 #ifdef RTE_LIBRTE_PMD_XENVIRT
469 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
470 (unsigned) mb_mempool_cache,
471 sizeof(struct rte_pktmbuf_pool_private),
472 rte_pktmbuf_pool_init, NULL,
473 rte_pktmbuf_init, NULL,
477 /* if the former XEN allocation failed fall back to normal allocation */
478 if (rte_mp == NULL) {
480 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
481 mb_size, (unsigned) mb_mempool_cache,
482 sizeof(struct rte_pktmbuf_pool_private),
487 if (rte_mempool_populate_anon(rte_mp) == 0) {
488 rte_mempool_free(rte_mp);
492 rte_pktmbuf_pool_init(rte_mp, NULL);
493 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
495 /* wrapper to rte_mempool_create() */
496 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
497 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
502 if (rte_mp == NULL) {
503 rte_exit(EXIT_FAILURE,
504 "Creation of mbuf pool for socket %u failed: %s\n",
505 socket_id, rte_strerror(rte_errno));
506 } else if (verbose_level > 0) {
507 rte_mempool_dump(stdout, rte_mp);
512 * Check given socket id is valid or not with NUMA mode,
513 * if valid, return 0, else return -1
516 check_socket_id(const unsigned int socket_id)
518 static int warning_once = 0;
520 if (socket_id >= max_socket) {
521 if (!warning_once && numa_support)
522 printf("Warning: NUMA should be configured manually by"
523 " using --port-numa-config and"
524 " --ring-numa-config parameters along with"
536 struct rte_port *port;
537 struct rte_mempool *mbp;
538 unsigned int nb_mbuf_per_pool;
540 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
542 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
545 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
546 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
547 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
550 /* Configuration of logical cores. */
551 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
552 sizeof(struct fwd_lcore *) * nb_lcores,
553 RTE_CACHE_LINE_SIZE);
554 if (fwd_lcores == NULL) {
555 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
556 "failed\n", nb_lcores);
558 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
559 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
560 sizeof(struct fwd_lcore),
561 RTE_CACHE_LINE_SIZE);
562 if (fwd_lcores[lc_id] == NULL) {
563 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
566 fwd_lcores[lc_id]->cpuid_idx = lc_id;
569 RTE_ETH_FOREACH_DEV(pid) {
571 rte_eth_dev_info_get(pid, &port->dev_info);
574 if (port_numa[pid] != NUMA_NO_CONFIG)
575 port_per_socket[port_numa[pid]]++;
577 uint32_t socket_id = rte_eth_dev_socket_id(pid);
579 /* if socket_id is invalid, set to 0 */
580 if (check_socket_id(socket_id) < 0)
582 port_per_socket[socket_id]++;
586 /* set flag to initialize port/queue */
587 port->need_reconfig = 1;
588 port->need_reconfig_queues = 1;
592 * Create pools of mbuf.
593 * If NUMA support is disabled, create a single pool of mbuf in
594 * socket 0 memory by default.
595 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
597 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
598 * nb_txd can be configured at run time.
600 if (param_total_num_mbufs)
601 nb_mbuf_per_pool = param_total_num_mbufs;
603 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
604 (nb_lcores * mb_mempool_cache) +
605 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
606 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
612 for (i = 0; i < max_socket; i++)
613 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
615 if (socket_num == UMA_NO_CONFIG)
616 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
618 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
625 * Records which Mbuf pool to use by each logical core, if needed.
627 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
628 mbp = mbuf_pool_find(
629 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
632 mbp = mbuf_pool_find(0);
633 fwd_lcores[lc_id]->mbp = mbp;
636 /* Configuration of packet forwarding streams. */
637 if (init_fwd_streams() < 0)
638 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
645 reconfig(portid_t new_port_id, unsigned socket_id)
647 struct rte_port *port;
649 /* Reconfiguration of Ethernet ports. */
650 port = &ports[new_port_id];
651 rte_eth_dev_info_get(new_port_id, &port->dev_info);
653 /* set flag to initialize port/queue */
654 port->need_reconfig = 1;
655 port->need_reconfig_queues = 1;
656 port->socket_id = socket_id;
663 init_fwd_streams(void)
666 struct rte_port *port;
667 streamid_t sm_id, nb_fwd_streams_new;
670 /* set socket id according to numa or not */
671 RTE_ETH_FOREACH_DEV(pid) {
673 if (nb_rxq > port->dev_info.max_rx_queues) {
674 printf("Fail: nb_rxq(%d) is greater than "
675 "max_rx_queues(%d)\n", nb_rxq,
676 port->dev_info.max_rx_queues);
679 if (nb_txq > port->dev_info.max_tx_queues) {
680 printf("Fail: nb_txq(%d) is greater than "
681 "max_tx_queues(%d)\n", nb_txq,
682 port->dev_info.max_tx_queues);
686 if (port_numa[pid] != NUMA_NO_CONFIG)
687 port->socket_id = port_numa[pid];
689 port->socket_id = rte_eth_dev_socket_id(pid);
691 /* if socket_id is invalid, set to 0 */
692 if (check_socket_id(port->socket_id) < 0)
697 if (socket_num == UMA_NO_CONFIG)
700 port->socket_id = socket_num;
704 q = RTE_MAX(nb_rxq, nb_txq);
706 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
709 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
710 if (nb_fwd_streams_new == nb_fwd_streams)
713 if (fwd_streams != NULL) {
714 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
715 if (fwd_streams[sm_id] == NULL)
717 rte_free(fwd_streams[sm_id]);
718 fwd_streams[sm_id] = NULL;
720 rte_free(fwd_streams);
725 nb_fwd_streams = nb_fwd_streams_new;
726 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
727 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
728 if (fwd_streams == NULL)
729 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
730 "failed\n", nb_fwd_streams);
732 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
733 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
734 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
735 if (fwd_streams[sm_id] == NULL)
736 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
743 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
745 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
747 unsigned int total_burst;
748 unsigned int nb_burst;
749 unsigned int burst_stats[3];
750 uint16_t pktnb_stats[3];
752 int burst_percent[3];
755 * First compute the total number of packet bursts and the
756 * two highest numbers of bursts of the same number of packets.
759 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
760 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
761 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
762 nb_burst = pbs->pkt_burst_spread[nb_pkt];
765 total_burst += nb_burst;
766 if (nb_burst > burst_stats[0]) {
767 burst_stats[1] = burst_stats[0];
768 pktnb_stats[1] = pktnb_stats[0];
769 burst_stats[0] = nb_burst;
770 pktnb_stats[0] = nb_pkt;
773 if (total_burst == 0)
775 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
776 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
777 burst_percent[0], (int) pktnb_stats[0]);
778 if (burst_stats[0] == total_burst) {
782 if (burst_stats[0] + burst_stats[1] == total_burst) {
783 printf(" + %d%% of %d pkts]\n",
784 100 - burst_percent[0], pktnb_stats[1]);
787 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
788 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
789 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
790 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
793 printf(" + %d%% of %d pkts + %d%% of others]\n",
794 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
796 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
799 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
801 struct rte_port *port;
804 static const char *fwd_stats_border = "----------------------";
806 port = &ports[port_id];
807 printf("\n %s Forward statistics for port %-2d %s\n",
808 fwd_stats_border, port_id, fwd_stats_border);
810 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
811 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
813 stats->ipackets, stats->imissed,
814 (uint64_t) (stats->ipackets + stats->imissed));
816 if (cur_fwd_eng == &csum_fwd_engine)
817 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
818 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
819 if ((stats->ierrors + stats->rx_nombuf) > 0) {
820 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
821 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
824 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
826 stats->opackets, port->tx_dropped,
827 (uint64_t) (stats->opackets + port->tx_dropped));
830 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
832 stats->ipackets, stats->imissed,
833 (uint64_t) (stats->ipackets + stats->imissed));
835 if (cur_fwd_eng == &csum_fwd_engine)
836 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
837 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
838 if ((stats->ierrors + stats->rx_nombuf) > 0) {
839 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
840 printf(" RX-nombufs: %14"PRIu64"\n",
844 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
846 stats->opackets, port->tx_dropped,
847 (uint64_t) (stats->opackets + port->tx_dropped));
850 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
852 pkt_burst_stats_display("RX",
853 &port->rx_stream->rx_burst_stats);
855 pkt_burst_stats_display("TX",
856 &port->tx_stream->tx_burst_stats);
859 if (port->rx_queue_stats_mapping_enabled) {
861 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
862 printf(" Stats reg %2d RX-packets:%14"PRIu64
863 " RX-errors:%14"PRIu64
864 " RX-bytes:%14"PRIu64"\n",
865 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
869 if (port->tx_queue_stats_mapping_enabled) {
870 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
871 printf(" Stats reg %2d TX-packets:%14"PRIu64
872 " TX-bytes:%14"PRIu64"\n",
873 i, stats->q_opackets[i], stats->q_obytes[i]);
877 printf(" %s--------------------------------%s\n",
878 fwd_stats_border, fwd_stats_border);
882 fwd_stream_stats_display(streamid_t stream_id)
884 struct fwd_stream *fs;
885 static const char *fwd_top_stats_border = "-------";
887 fs = fwd_streams[stream_id];
888 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
889 (fs->fwd_dropped == 0))
891 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
892 "TX Port=%2d/Queue=%2d %s\n",
893 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
894 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
895 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
896 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
898 /* if checksum mode */
899 if (cur_fwd_eng == &csum_fwd_engine) {
900 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
901 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
904 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
905 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
906 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
911 flush_fwd_rx_queues(void)
913 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
920 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
921 uint64_t timer_period;
923 /* convert to number of cycles */
924 timer_period = rte_get_timer_hz(); /* 1 second timeout */
926 for (j = 0; j < 2; j++) {
927 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
928 for (rxq = 0; rxq < nb_rxq; rxq++) {
929 port_id = fwd_ports_ids[rxp];
931 * testpmd can stuck in the below do while loop
932 * if rte_eth_rx_burst() always returns nonzero
933 * packets. So timer is added to exit this loop
934 * after 1sec timer expiry.
936 prev_tsc = rte_rdtsc();
938 nb_rx = rte_eth_rx_burst(port_id, rxq,
939 pkts_burst, MAX_PKT_BURST);
940 for (i = 0; i < nb_rx; i++)
941 rte_pktmbuf_free(pkts_burst[i]);
943 cur_tsc = rte_rdtsc();
944 diff_tsc = cur_tsc - prev_tsc;
945 timer_tsc += diff_tsc;
946 } while ((nb_rx > 0) &&
947 (timer_tsc < timer_period));
951 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
956 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
958 struct fwd_stream **fsm;
961 #ifdef RTE_LIBRTE_BITRATE
962 uint64_t tics_per_1sec;
964 uint64_t tics_current;
965 uint8_t idx_port, cnt_ports;
967 cnt_ports = rte_eth_dev_count();
968 tics_datum = rte_rdtsc();
969 tics_per_1sec = rte_get_timer_hz();
971 fsm = &fwd_streams[fc->stream_idx];
972 nb_fs = fc->stream_nb;
974 for (sm_id = 0; sm_id < nb_fs; sm_id++)
975 (*pkt_fwd)(fsm[sm_id]);
976 #ifdef RTE_LIBRTE_BITRATE
977 if (bitrate_enabled != 0 &&
978 bitrate_lcore_id == rte_lcore_id()) {
979 tics_current = rte_rdtsc();
980 if (tics_current - tics_datum >= tics_per_1sec) {
981 /* Periodic bitrate calculation */
983 idx_port < cnt_ports;
985 rte_stats_bitrate_calc(bitrate_data,
987 tics_datum = tics_current;
991 #ifdef RTE_LIBRTE_LATENCY_STATS
992 if (latencystats_enabled != 0 &&
993 latencystats_lcore_id == rte_lcore_id())
994 rte_latencystats_update();
997 } while (! fc->stopped);
1001 start_pkt_forward_on_core(void *fwd_arg)
1003 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1004 cur_fwd_config.fwd_eng->packet_fwd);
1009 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1010 * Used to start communication flows in network loopback test configurations.
1013 run_one_txonly_burst_on_core(void *fwd_arg)
1015 struct fwd_lcore *fwd_lc;
1016 struct fwd_lcore tmp_lcore;
1018 fwd_lc = (struct fwd_lcore *) fwd_arg;
1019 tmp_lcore = *fwd_lc;
1020 tmp_lcore.stopped = 1;
1021 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1026 * Launch packet forwarding:
1027 * - Setup per-port forwarding context.
1028 * - launch logical cores with their forwarding configuration.
1031 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1033 port_fwd_begin_t port_fwd_begin;
1038 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1039 if (port_fwd_begin != NULL) {
1040 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1041 (*port_fwd_begin)(fwd_ports_ids[i]);
1043 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1044 lc_id = fwd_lcores_cpuids[i];
1045 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1046 fwd_lcores[i]->stopped = 0;
1047 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1048 fwd_lcores[i], lc_id);
1050 printf("launch lcore %u failed - diag=%d\n",
1057 * Launch packet forwarding configuration.
1060 start_packet_forwarding(int with_tx_first)
1062 port_fwd_begin_t port_fwd_begin;
1063 port_fwd_end_t port_fwd_end;
1064 struct rte_port *port;
1069 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1070 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1072 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1073 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1075 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1076 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1077 (!nb_rxq || !nb_txq))
1078 rte_exit(EXIT_FAILURE,
1079 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1080 cur_fwd_eng->fwd_mode_name);
1082 if (all_ports_started() == 0) {
1083 printf("Not all ports were started\n");
1086 if (test_done == 0) {
1087 printf("Packet forwarding already started\n");
1091 if (init_fwd_streams() < 0) {
1092 printf("Fail from init_fwd_streams()\n");
1097 for (i = 0; i < nb_fwd_ports; i++) {
1098 pt_id = fwd_ports_ids[i];
1099 port = &ports[pt_id];
1100 if (!port->dcb_flag) {
1101 printf("In DCB mode, all forwarding ports must "
1102 "be configured in this mode.\n");
1106 if (nb_fwd_lcores == 1) {
1107 printf("In DCB mode,the nb forwarding cores "
1108 "should be larger than 1.\n");
1115 flush_fwd_rx_queues();
1118 pkt_fwd_config_display(&cur_fwd_config);
1119 rxtx_config_display();
1121 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1122 pt_id = fwd_ports_ids[i];
1123 port = &ports[pt_id];
1124 rte_eth_stats_get(pt_id, &port->stats);
1125 port->tx_dropped = 0;
1127 map_port_queue_stats_mapping_registers(pt_id, port);
1129 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1130 fwd_streams[sm_id]->rx_packets = 0;
1131 fwd_streams[sm_id]->tx_packets = 0;
1132 fwd_streams[sm_id]->fwd_dropped = 0;
1133 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1134 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1136 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1137 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1138 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1139 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1140 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1142 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1143 fwd_streams[sm_id]->core_cycles = 0;
1146 if (with_tx_first) {
1147 port_fwd_begin = tx_only_engine.port_fwd_begin;
1148 if (port_fwd_begin != NULL) {
1149 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1150 (*port_fwd_begin)(fwd_ports_ids[i]);
1152 while (with_tx_first--) {
1153 launch_packet_forwarding(
1154 run_one_txonly_burst_on_core);
1155 rte_eal_mp_wait_lcore();
1157 port_fwd_end = tx_only_engine.port_fwd_end;
1158 if (port_fwd_end != NULL) {
1159 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1160 (*port_fwd_end)(fwd_ports_ids[i]);
1163 launch_packet_forwarding(start_pkt_forward_on_core);
1167 stop_packet_forwarding(void)
1169 struct rte_eth_stats stats;
1170 struct rte_port *port;
1171 port_fwd_end_t port_fwd_end;
1176 uint64_t total_recv;
1177 uint64_t total_xmit;
1178 uint64_t total_rx_dropped;
1179 uint64_t total_tx_dropped;
1180 uint64_t total_rx_nombuf;
1181 uint64_t tx_dropped;
1182 uint64_t rx_bad_ip_csum;
1183 uint64_t rx_bad_l4_csum;
1184 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1185 uint64_t fwd_cycles;
1187 static const char *acc_stats_border = "+++++++++++++++";
1190 printf("Packet forwarding not started\n");
1193 printf("Telling cores to stop...");
1194 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1195 fwd_lcores[lc_id]->stopped = 1;
1196 printf("\nWaiting for lcores to finish...\n");
1197 rte_eal_mp_wait_lcore();
1198 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1199 if (port_fwd_end != NULL) {
1200 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1201 pt_id = fwd_ports_ids[i];
1202 (*port_fwd_end)(pt_id);
1205 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1208 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1209 if (cur_fwd_config.nb_fwd_streams >
1210 cur_fwd_config.nb_fwd_ports) {
1211 fwd_stream_stats_display(sm_id);
1212 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1213 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1215 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1217 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1220 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1221 tx_dropped = (uint64_t) (tx_dropped +
1222 fwd_streams[sm_id]->fwd_dropped);
1223 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1226 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1227 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1228 fwd_streams[sm_id]->rx_bad_ip_csum);
1229 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1233 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1234 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1235 fwd_streams[sm_id]->rx_bad_l4_csum);
1236 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1239 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1240 fwd_cycles = (uint64_t) (fwd_cycles +
1241 fwd_streams[sm_id]->core_cycles);
1246 total_rx_dropped = 0;
1247 total_tx_dropped = 0;
1248 total_rx_nombuf = 0;
1249 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1250 pt_id = fwd_ports_ids[i];
1252 port = &ports[pt_id];
1253 rte_eth_stats_get(pt_id, &stats);
1254 stats.ipackets -= port->stats.ipackets;
1255 port->stats.ipackets = 0;
1256 stats.opackets -= port->stats.opackets;
1257 port->stats.opackets = 0;
1258 stats.ibytes -= port->stats.ibytes;
1259 port->stats.ibytes = 0;
1260 stats.obytes -= port->stats.obytes;
1261 port->stats.obytes = 0;
1262 stats.imissed -= port->stats.imissed;
1263 port->stats.imissed = 0;
1264 stats.oerrors -= port->stats.oerrors;
1265 port->stats.oerrors = 0;
1266 stats.rx_nombuf -= port->stats.rx_nombuf;
1267 port->stats.rx_nombuf = 0;
1269 total_recv += stats.ipackets;
1270 total_xmit += stats.opackets;
1271 total_rx_dropped += stats.imissed;
1272 total_tx_dropped += port->tx_dropped;
1273 total_rx_nombuf += stats.rx_nombuf;
1275 fwd_port_stats_display(pt_id, &stats);
1277 printf("\n %s Accumulated forward statistics for all ports"
1279 acc_stats_border, acc_stats_border);
1280 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1282 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1284 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1285 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1286 if (total_rx_nombuf > 0)
1287 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1288 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1290 acc_stats_border, acc_stats_border);
1291 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1293 printf("\n CPU cycles/packet=%u (total cycles="
1294 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1295 (unsigned int)(fwd_cycles / total_recv),
1296 fwd_cycles, total_recv);
1298 printf("\nDone.\n");
1303 dev_set_link_up(portid_t pid)
1305 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1306 printf("\nSet link up fail.\n");
1310 dev_set_link_down(portid_t pid)
1312 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1313 printf("\nSet link down fail.\n");
1317 all_ports_started(void)
1320 struct rte_port *port;
1322 RTE_ETH_FOREACH_DEV(pi) {
1324 /* Check if there is a port which is not started */
1325 if ((port->port_status != RTE_PORT_STARTED) &&
1326 (port->slave_flag == 0))
1330 /* No port is not started */
1335 all_ports_stopped(void)
1338 struct rte_port *port;
1340 RTE_ETH_FOREACH_DEV(pi) {
1342 if ((port->port_status != RTE_PORT_STOPPED) &&
1343 (port->slave_flag == 0))
1351 port_is_started(portid_t port_id)
1353 if (port_id_is_invalid(port_id, ENABLED_WARN))
1356 if (ports[port_id].port_status != RTE_PORT_STARTED)
1363 port_is_closed(portid_t port_id)
1365 if (port_id_is_invalid(port_id, ENABLED_WARN))
1368 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1375 start_port(portid_t pid)
1377 int diag, need_check_link_status = -1;
1380 struct rte_port *port;
1381 struct ether_addr mac_addr;
1382 enum rte_eth_event_type event_type;
1384 if (port_id_is_invalid(pid, ENABLED_WARN))
1389 RTE_ETH_FOREACH_DEV(pi) {
1390 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1393 need_check_link_status = 0;
1395 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1396 RTE_PORT_HANDLING) == 0) {
1397 printf("Port %d is now not stopped\n", pi);
1401 if (port->need_reconfig > 0) {
1402 port->need_reconfig = 0;
1404 printf("Configuring Port %d (socket %u)\n", pi,
1406 /* configure port */
1407 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1410 if (rte_atomic16_cmpset(&(port->port_status),
1411 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1412 printf("Port %d can not be set back "
1413 "to stopped\n", pi);
1414 printf("Fail to configure port %d\n", pi);
1415 /* try to reconfigure port next time */
1416 port->need_reconfig = 1;
1420 if (port->need_reconfig_queues > 0) {
1421 port->need_reconfig_queues = 0;
1422 /* setup tx queues */
1423 for (qi = 0; qi < nb_txq; qi++) {
1424 if ((numa_support) &&
1425 (txring_numa[pi] != NUMA_NO_CONFIG))
1426 diag = rte_eth_tx_queue_setup(pi, qi,
1427 nb_txd,txring_numa[pi],
1430 diag = rte_eth_tx_queue_setup(pi, qi,
1431 nb_txd,port->socket_id,
1437 /* Fail to setup tx queue, return */
1438 if (rte_atomic16_cmpset(&(port->port_status),
1440 RTE_PORT_STOPPED) == 0)
1441 printf("Port %d can not be set back "
1442 "to stopped\n", pi);
1443 printf("Fail to configure port %d tx queues\n", pi);
1444 /* try to reconfigure queues next time */
1445 port->need_reconfig_queues = 1;
1448 /* setup rx queues */
1449 for (qi = 0; qi < nb_rxq; qi++) {
1450 if ((numa_support) &&
1451 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1452 struct rte_mempool * mp =
1453 mbuf_pool_find(rxring_numa[pi]);
1455 printf("Failed to setup RX queue:"
1456 "No mempool allocation"
1457 " on the socket %d\n",
1462 diag = rte_eth_rx_queue_setup(pi, qi,
1463 nb_rxd,rxring_numa[pi],
1464 &(port->rx_conf),mp);
1466 struct rte_mempool *mp =
1467 mbuf_pool_find(port->socket_id);
1469 printf("Failed to setup RX queue:"
1470 "No mempool allocation"
1471 " on the socket %d\n",
1475 diag = rte_eth_rx_queue_setup(pi, qi,
1476 nb_rxd,port->socket_id,
1477 &(port->rx_conf), mp);
1482 /* Fail to setup rx queue, return */
1483 if (rte_atomic16_cmpset(&(port->port_status),
1485 RTE_PORT_STOPPED) == 0)
1486 printf("Port %d can not be set back "
1487 "to stopped\n", pi);
1488 printf("Fail to configure port %d rx queues\n", pi);
1489 /* try to reconfigure queues next time */
1490 port->need_reconfig_queues = 1;
1495 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1496 event_type < RTE_ETH_EVENT_MAX;
1498 diag = rte_eth_dev_callback_register(pi,
1503 printf("Failed to setup even callback for event %d\n",
1510 if (rte_eth_dev_start(pi) < 0) {
1511 printf("Fail to start port %d\n", pi);
1513 /* Fail to setup rx queue, return */
1514 if (rte_atomic16_cmpset(&(port->port_status),
1515 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1516 printf("Port %d can not be set back to "
1521 if (rte_atomic16_cmpset(&(port->port_status),
1522 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1523 printf("Port %d can not be set into started\n", pi);
1525 rte_eth_macaddr_get(pi, &mac_addr);
1526 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1527 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1528 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1529 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1531 /* at least one port started, need checking link status */
1532 need_check_link_status = 1;
1535 if (need_check_link_status == 1 && !no_link_check)
1536 check_all_ports_link_status(RTE_PORT_ALL);
1537 else if (need_check_link_status == 0)
1538 printf("Please stop the ports first\n");
1545 stop_port(portid_t pid)
1548 struct rte_port *port;
1549 int need_check_link_status = 0;
1556 if (port_id_is_invalid(pid, ENABLED_WARN))
1559 printf("Stopping ports...\n");
1561 RTE_ETH_FOREACH_DEV(pi) {
1562 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1565 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1566 printf("Please remove port %d from forwarding configuration.\n", pi);
1570 if (port_is_bonding_slave(pi)) {
1571 printf("Please remove port %d from bonded device.\n", pi);
1576 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1577 RTE_PORT_HANDLING) == 0)
1580 rte_eth_dev_stop(pi);
1582 if (rte_atomic16_cmpset(&(port->port_status),
1583 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1584 printf("Port %d can not be set into stopped\n", pi);
1585 need_check_link_status = 1;
1587 if (need_check_link_status && !no_link_check)
1588 check_all_ports_link_status(RTE_PORT_ALL);
1594 close_port(portid_t pid)
1597 struct rte_port *port;
1599 if (port_id_is_invalid(pid, ENABLED_WARN))
1602 printf("Closing ports...\n");
1604 RTE_ETH_FOREACH_DEV(pi) {
1605 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1608 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1609 printf("Please remove port %d from forwarding configuration.\n", pi);
1613 if (port_is_bonding_slave(pi)) {
1614 printf("Please remove port %d from bonded device.\n", pi);
1619 if (rte_atomic16_cmpset(&(port->port_status),
1620 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1621 printf("Port %d is already closed\n", pi);
1625 if (rte_atomic16_cmpset(&(port->port_status),
1626 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1627 printf("Port %d is now not stopped\n", pi);
1631 if (port->flow_list)
1632 port_flow_flush(pi);
1633 rte_eth_dev_close(pi);
1635 if (rte_atomic16_cmpset(&(port->port_status),
1636 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1637 printf("Port %d cannot be set to closed\n", pi);
1644 attach_port(char *identifier)
1647 unsigned int socket_id;
1649 printf("Attaching a new port...\n");
1651 if (identifier == NULL) {
1652 printf("Invalid parameters are specified\n");
1656 if (rte_eth_dev_attach(identifier, &pi))
1659 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1660 /* if socket_id is invalid, set to 0 */
1661 if (check_socket_id(socket_id) < 0)
1663 reconfig(pi, socket_id);
1664 rte_eth_promiscuous_enable(pi);
1666 nb_ports = rte_eth_dev_count();
1668 ports[pi].port_status = RTE_PORT_STOPPED;
1670 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1675 detach_port(uint8_t port_id)
1677 char name[RTE_ETH_NAME_MAX_LEN];
1679 printf("Detaching a port...\n");
1681 if (!port_is_closed(port_id)) {
1682 printf("Please close port first\n");
1686 if (ports[port_id].flow_list)
1687 port_flow_flush(port_id);
1689 if (rte_eth_dev_detach(port_id, name))
1692 nb_ports = rte_eth_dev_count();
1694 printf("Port '%s' is detached. Now total ports is %d\n",
1706 stop_packet_forwarding();
1708 if (ports != NULL) {
1710 RTE_ETH_FOREACH_DEV(pt_id) {
1711 printf("\nShutting down port %d...\n", pt_id);
1717 printf("\nBye...\n");
1720 typedef void (*cmd_func_t)(void);
1721 struct pmd_test_command {
1722 const char *cmd_name;
1723 cmd_func_t cmd_func;
1726 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1728 /* Check the link status of all ports in up to 9s, and print them finally */
1730 check_all_ports_link_status(uint32_t port_mask)
1732 #define CHECK_INTERVAL 100 /* 100ms */
1733 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1734 uint8_t portid, count, all_ports_up, print_flag = 0;
1735 struct rte_eth_link link;
1737 printf("Checking link statuses...\n");
1739 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1741 RTE_ETH_FOREACH_DEV(portid) {
1742 if ((port_mask & (1 << portid)) == 0)
1744 memset(&link, 0, sizeof(link));
1745 rte_eth_link_get_nowait(portid, &link);
1746 /* print link status if flag set */
1747 if (print_flag == 1) {
1748 if (link.link_status)
1749 printf("Port %d Link Up - speed %u "
1750 "Mbps - %s\n", (uint8_t)portid,
1751 (unsigned)link.link_speed,
1752 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1753 ("full-duplex") : ("half-duplex\n"));
1755 printf("Port %d Link Down\n",
1759 /* clear all_ports_up flag if any link down */
1760 if (link.link_status == ETH_LINK_DOWN) {
1765 /* after finally printing all link status, get out */
1766 if (print_flag == 1)
1769 if (all_ports_up == 0) {
1771 rte_delay_ms(CHECK_INTERVAL);
1774 /* set the print_flag if all ports up or timeout */
1775 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1785 rmv_event_callback(void *arg)
1787 struct rte_eth_dev *dev;
1788 struct rte_devargs *da;
1790 uint8_t port_id = (intptr_t)arg;
1792 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1793 dev = &rte_eth_devices[port_id];
1794 da = dev->device->devargs;
1797 close_port(port_id);
1798 if (da->type == RTE_DEVTYPE_VIRTUAL)
1799 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1800 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1801 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1802 printf("removing device %s\n", name);
1803 rte_eal_dev_detach(name);
1804 dev->state = RTE_ETH_DEV_UNUSED;
1807 /* This function is used by the interrupt thread */
1809 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1811 static const char * const event_desc[] = {
1812 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1813 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1814 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1815 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1816 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1817 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1818 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1819 [RTE_ETH_EVENT_MAX] = NULL,
1822 RTE_SET_USED(param);
1824 if (type >= RTE_ETH_EVENT_MAX) {
1825 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1826 port_id, __func__, type);
1828 } else if (event_print_mask & (UINT32_C(1) << type)) {
1829 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1835 case RTE_ETH_EVENT_INTR_RMV:
1836 if (rte_eal_alarm_set(100000,
1837 rmv_event_callback, (void *)(intptr_t)port_id))
1838 fprintf(stderr, "Could not set up deferred device removal\n");
1846 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1850 uint8_t mapping_found = 0;
1852 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1853 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1854 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1855 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1856 tx_queue_stats_mappings[i].queue_id,
1857 tx_queue_stats_mappings[i].stats_counter_id);
1864 port->tx_queue_stats_mapping_enabled = 1;
1869 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1873 uint8_t mapping_found = 0;
1875 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1876 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1877 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1878 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1879 rx_queue_stats_mappings[i].queue_id,
1880 rx_queue_stats_mappings[i].stats_counter_id);
1887 port->rx_queue_stats_mapping_enabled = 1;
1892 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1896 diag = set_tx_queue_stats_mapping_registers(pi, port);
1898 if (diag == -ENOTSUP) {
1899 port->tx_queue_stats_mapping_enabled = 0;
1900 printf("TX queue stats mapping not supported port id=%d\n", pi);
1903 rte_exit(EXIT_FAILURE,
1904 "set_tx_queue_stats_mapping_registers "
1905 "failed for port id=%d diag=%d\n",
1909 diag = set_rx_queue_stats_mapping_registers(pi, port);
1911 if (diag == -ENOTSUP) {
1912 port->rx_queue_stats_mapping_enabled = 0;
1913 printf("RX queue stats mapping not supported port id=%d\n", pi);
1916 rte_exit(EXIT_FAILURE,
1917 "set_rx_queue_stats_mapping_registers "
1918 "failed for port id=%d diag=%d\n",
1924 rxtx_port_config(struct rte_port *port)
1926 port->rx_conf = port->dev_info.default_rxconf;
1927 port->tx_conf = port->dev_info.default_txconf;
1929 /* Check if any RX/TX parameters have been passed */
1930 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1931 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1933 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1934 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1936 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1937 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1939 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1940 port->rx_conf.rx_free_thresh = rx_free_thresh;
1942 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1943 port->rx_conf.rx_drop_en = rx_drop_en;
1945 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1946 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1948 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1949 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1951 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1952 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1954 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1955 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1957 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1958 port->tx_conf.tx_free_thresh = tx_free_thresh;
1960 if (txq_flags != RTE_PMD_PARAM_UNSET)
1961 port->tx_conf.txq_flags = txq_flags;
1965 init_port_config(void)
1968 struct rte_port *port;
1970 RTE_ETH_FOREACH_DEV(pid) {
1972 port->dev_conf.rxmode = rx_mode;
1973 port->dev_conf.fdir_conf = fdir_conf;
1975 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1976 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1978 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1979 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1982 if (port->dcb_flag == 0) {
1983 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1984 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1986 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1989 rxtx_port_config(port);
1991 rte_eth_macaddr_get(pid, &port->eth_addr);
1993 map_port_queue_stats_mapping_registers(pid, port);
1994 #ifdef RTE_NIC_BYPASS
1995 rte_eth_dev_bypass_init(pid);
1998 if (lsc_interrupt &&
1999 (rte_eth_devices[pid].data->dev_flags &
2000 RTE_ETH_DEV_INTR_LSC))
2001 port->dev_conf.intr_conf.lsc = 1;
2002 if (rmv_interrupt &&
2003 (rte_eth_devices[pid].data->dev_flags &
2004 RTE_ETH_DEV_INTR_RMV))
2005 port->dev_conf.intr_conf.rmv = 1;
2009 void set_port_slave_flag(portid_t slave_pid)
2011 struct rte_port *port;
2013 port = &ports[slave_pid];
2014 port->slave_flag = 1;
2017 void clear_port_slave_flag(portid_t slave_pid)
2019 struct rte_port *port;
2021 port = &ports[slave_pid];
2022 port->slave_flag = 0;
2025 uint8_t port_is_bonding_slave(portid_t slave_pid)
2027 struct rte_port *port;
2029 port = &ports[slave_pid];
2030 return port->slave_flag;
2033 const uint16_t vlan_tags[] = {
2034 0, 1, 2, 3, 4, 5, 6, 7,
2035 8, 9, 10, 11, 12, 13, 14, 15,
2036 16, 17, 18, 19, 20, 21, 22, 23,
2037 24, 25, 26, 27, 28, 29, 30, 31
2041 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2042 enum dcb_mode_enable dcb_mode,
2043 enum rte_eth_nb_tcs num_tcs,
2049 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2050 * given above, and the number of traffic classes available for use.
2052 if (dcb_mode == DCB_VT_ENABLED) {
2053 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2054 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2055 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2056 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2058 /* VMDQ+DCB RX and TX configurations */
2059 vmdq_rx_conf->enable_default_pool = 0;
2060 vmdq_rx_conf->default_pool = 0;
2061 vmdq_rx_conf->nb_queue_pools =
2062 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2063 vmdq_tx_conf->nb_queue_pools =
2064 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2066 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2067 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2068 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2069 vmdq_rx_conf->pool_map[i].pools =
2070 1 << (i % vmdq_rx_conf->nb_queue_pools);
2072 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2073 vmdq_rx_conf->dcb_tc[i] = i;
2074 vmdq_tx_conf->dcb_tc[i] = i;
2077 /* set DCB mode of RX and TX of multiple queues */
2078 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2079 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2081 struct rte_eth_dcb_rx_conf *rx_conf =
2082 ð_conf->rx_adv_conf.dcb_rx_conf;
2083 struct rte_eth_dcb_tx_conf *tx_conf =
2084 ð_conf->tx_adv_conf.dcb_tx_conf;
2086 rx_conf->nb_tcs = num_tcs;
2087 tx_conf->nb_tcs = num_tcs;
2089 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2090 rx_conf->dcb_tc[i] = i % num_tcs;
2091 tx_conf->dcb_tc[i] = i % num_tcs;
2093 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2094 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2095 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2099 eth_conf->dcb_capability_en =
2100 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2102 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2108 init_port_dcb_config(portid_t pid,
2109 enum dcb_mode_enable dcb_mode,
2110 enum rte_eth_nb_tcs num_tcs,
2113 struct rte_eth_conf port_conf;
2114 struct rte_port *rte_port;
2118 rte_port = &ports[pid];
2120 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2121 /* Enter DCB configuration status */
2124 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2125 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2128 port_conf.rxmode.hw_vlan_filter = 1;
2131 * Write the configuration into the device.
2132 * Set the numbers of RX & TX queues to 0, so
2133 * the RX & TX queues will not be setup.
2135 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2137 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2139 /* If dev_info.vmdq_pool_base is greater than 0,
2140 * the queue id of vmdq pools is started after pf queues.
2142 if (dcb_mode == DCB_VT_ENABLED &&
2143 rte_port->dev_info.vmdq_pool_base > 0) {
2144 printf("VMDQ_DCB multi-queue mode is nonsensical"
2145 " for port %d.", pid);
2149 /* Assume the ports in testpmd have the same dcb capability
2150 * and has the same number of rxq and txq in dcb mode
2152 if (dcb_mode == DCB_VT_ENABLED) {
2153 if (rte_port->dev_info.max_vfs > 0) {
2154 nb_rxq = rte_port->dev_info.nb_rx_queues;
2155 nb_txq = rte_port->dev_info.nb_tx_queues;
2157 nb_rxq = rte_port->dev_info.max_rx_queues;
2158 nb_txq = rte_port->dev_info.max_tx_queues;
2161 /*if vt is disabled, use all pf queues */
2162 if (rte_port->dev_info.vmdq_pool_base == 0) {
2163 nb_rxq = rte_port->dev_info.max_rx_queues;
2164 nb_txq = rte_port->dev_info.max_tx_queues;
2166 nb_rxq = (queueid_t)num_tcs;
2167 nb_txq = (queueid_t)num_tcs;
2171 rx_free_thresh = 64;
2173 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2175 rxtx_port_config(rte_port);
2177 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2178 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2179 rx_vft_set(pid, vlan_tags[i], 1);
2181 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2182 map_port_queue_stats_mapping_registers(pid, rte_port);
2184 rte_port->dcb_flag = 1;
2192 /* Configuration of Ethernet ports. */
2193 ports = rte_zmalloc("testpmd: ports",
2194 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2195 RTE_CACHE_LINE_SIZE);
2196 if (ports == NULL) {
2197 rte_exit(EXIT_FAILURE,
2198 "rte_zmalloc(%d struct rte_port) failed\n",
2211 signal_handler(int signum)
2213 if (signum == SIGINT || signum == SIGTERM) {
2214 printf("\nSignal %d received, preparing to exit...\n",
2216 #ifdef RTE_LIBRTE_PDUMP
2217 /* uninitialize packet capture framework */
2220 #ifdef RTE_LIBRTE_LATENCY_STATS
2221 rte_latencystats_uninit();
2224 /* exit with the expected status */
2225 signal(signum, SIG_DFL);
2226 kill(getpid(), signum);
2231 main(int argc, char** argv)
2236 signal(SIGINT, signal_handler);
2237 signal(SIGTERM, signal_handler);
2239 diag = rte_eal_init(argc, argv);
2241 rte_panic("Cannot init EAL\n");
2243 #ifdef RTE_LIBRTE_PDUMP
2244 /* initialize packet capture framework */
2245 rte_pdump_init(NULL);
2248 nb_ports = (portid_t) rte_eth_dev_count();
2250 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2252 /* allocate port structures, and init them */
2255 set_def_fwd_config();
2257 rte_panic("Empty set of forwarding logical cores - check the "
2258 "core mask supplied in the command parameters\n");
2260 /* Bitrate/latency stats disabled by default */
2261 #ifdef RTE_LIBRTE_BITRATE
2262 bitrate_enabled = 0;
2264 #ifdef RTE_LIBRTE_LATENCY_STATS
2265 latencystats_enabled = 0;
2271 launch_args_parse(argc, argv);
2273 if (!nb_rxq && !nb_txq)
2274 printf("Warning: Either rx or tx queues should be non-zero\n");
2276 if (nb_rxq > 1 && nb_rxq > nb_txq)
2277 printf("Warning: nb_rxq=%d enables RSS configuration, "
2278 "but nb_txq=%d will prevent to fully test it.\n",
2282 if (start_port(RTE_PORT_ALL) != 0)
2283 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2285 /* set all ports to promiscuous mode by default */
2286 RTE_ETH_FOREACH_DEV(port_id)
2287 rte_eth_promiscuous_enable(port_id);
2289 /* Init metrics library */
2290 rte_metrics_init(rte_socket_id());
2292 #ifdef RTE_LIBRTE_LATENCY_STATS
2293 if (latencystats_enabled != 0) {
2294 int ret = rte_latencystats_init(1, NULL);
2296 printf("Warning: latencystats init()"
2297 " returned error %d\n", ret);
2298 printf("Latencystats running on lcore %d\n",
2299 latencystats_lcore_id);
2303 /* Setup bitrate stats */
2304 #ifdef RTE_LIBRTE_BITRATE
2305 if (bitrate_enabled != 0) {
2306 bitrate_data = rte_stats_bitrate_create();
2307 if (bitrate_data == NULL)
2308 rte_exit(EXIT_FAILURE,
2309 "Could not allocate bitrate data.\n");
2310 rte_stats_bitrate_reg(bitrate_data);
2314 #ifdef RTE_LIBRTE_CMDLINE
2315 if (strlen(cmdline_filename) != 0)
2316 cmdline_read_from_file(cmdline_filename);
2318 if (interactive == 1) {
2320 printf("Start automatic packet forwarding\n");
2321 start_packet_forwarding(0);
2331 printf("No commandline core given, start packet forwarding\n");
2332 start_packet_forwarding(0);
2333 printf("Press enter to exit\n");
2334 rc = read(0, &c, 1);