4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
96 uint16_t verbose_level = 0; /**< Silent by default. */
98 /* use master core for command line ? */
99 uint8_t interactive = 0;
100 uint8_t auto_start = 0;
101 char cmdline_filename[PATH_MAX] = {0};
104 * NUMA support configuration.
105 * When set, the NUMA support attempts to dispatch the allocation of the
106 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
107 * probed ports among the CPU sockets 0 and 1.
108 * Otherwise, all memory is allocated from CPU socket 0.
110 uint8_t numa_support = 1; /**< numa enabled by default */
113 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
116 uint8_t socket_num = UMA_NO_CONFIG;
119 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
124 * Record the Ethernet address of peer target ports to which packets are
126 * Must be instantiated with the ethernet addresses of peer traffic generator
129 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
130 portid_t nb_peer_eth_addrs = 0;
133 * Probed Target Environment.
135 struct rte_port *ports; /**< For all probed ethernet ports. */
136 portid_t nb_ports; /**< Number of probed ethernet ports. */
137 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
138 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
141 * Test Forwarding Configuration.
142 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
143 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
145 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
146 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
147 portid_t nb_cfg_ports; /**< Number of configured ports. */
148 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
150 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
151 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
153 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
154 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
157 * Forwarding engines.
159 struct fwd_engine * fwd_engines[] = {
168 #ifdef RTE_LIBRTE_IEEE1588
169 &ieee1588_fwd_engine,
174 struct fwd_config cur_fwd_config;
175 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
176 uint32_t retry_enabled;
177 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
178 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
180 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
181 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
182 * specified on command-line. */
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
214 #define RTE_TEST_RX_DESC_DEFAULT 128
215 #define RTE_TEST_TX_DESC_DEFAULT 512
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 #define RTE_PMD_PARAM_UNSET -1
221 * Configurable values of RX and TX ring threshold registers.
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 * Configurable value of RX free threshold.
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 * Configurable value of RX drop enable.
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 * Configurable value of TX free threshold.
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 * Configurable value of TX RS bit threshold.
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 * Configurable value of TX queue flags.
255 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
258 * Receive Side Scaling (RSS) configuration.
260 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
263 * Port topology configuration
265 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
268 * Avoids to flush all the RX streams before starts forwarding.
270 uint8_t no_flush_rx = 0; /* flush by default */
273 * Avoids to check link status when starting/stopping a port.
275 uint8_t no_link_check = 0; /* check by default */
278 * Enable link status change notification
280 uint8_t lsc_interrupt = 1; /* enabled by default */
283 * Enable device removal notification.
285 uint8_t rmv_interrupt = 1; /* enabled by default */
288 * Display or mask ether events
289 * Default to all events except VF_MBOX
291 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
292 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
293 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
294 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
295 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
299 * NIC bypass mode configuration options.
302 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
303 /* The NIC bypass watchdog timeout. */
304 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
308 #ifdef RTE_LIBRTE_LATENCY_STATS
311 * Set when latency stats is enabled in the commandline
313 uint8_t latencystats_enabled;
316 * Lcore ID to serive latency statistics.
318 lcoreid_t latencystats_lcore_id = -1;
323 * Ethernet device configuration.
325 struct rte_eth_rxmode rx_mode = {
326 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
328 .header_split = 0, /**< Header Split disabled. */
329 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
330 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
331 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
332 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
333 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
334 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
337 struct rte_fdir_conf fdir_conf = {
338 .mode = RTE_FDIR_MODE_NONE,
339 .pballoc = RTE_FDIR_PBALLOC_64K,
340 .status = RTE_FDIR_REPORT_STATUS,
342 .vlan_tci_mask = 0x0,
344 .src_ip = 0xFFFFFFFF,
345 .dst_ip = 0xFFFFFFFF,
348 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
349 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 .src_port_mask = 0xFFFF,
352 .dst_port_mask = 0xFFFF,
353 .mac_addr_byte_mask = 0xFF,
354 .tunnel_type_mask = 1,
355 .tunnel_id_mask = 0xFFFFFFFF,
360 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
362 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
363 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
365 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
366 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
368 uint16_t nb_tx_queue_stats_mappings = 0;
369 uint16_t nb_rx_queue_stats_mappings = 0;
371 unsigned int num_sockets = 0;
372 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
374 #ifdef RTE_LIBRTE_BITRATE
375 /* Bitrate statistics */
376 struct rte_stats_bitrates *bitrate_data;
377 lcoreid_t bitrate_lcore_id;
378 uint8_t bitrate_enabled;
381 /* Forward function declarations */
382 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
383 static void check_all_ports_link_status(uint32_t port_mask);
384 static void eth_event_callback(uint8_t port_id,
385 enum rte_eth_event_type type,
389 * Check if all the ports are started.
390 * If yes, return positive value. If not, return zero.
392 static int all_ports_started(void);
395 * Helper function to check if socket is already discovered.
396 * If yes, return positive value. If not, return zero.
399 new_socket_id(unsigned int socket_id)
403 for (i = 0; i < num_sockets; i++) {
404 if (socket_ids[i] == socket_id)
411 * Setup default configuration.
414 set_default_fwd_lcores_config(void)
418 unsigned int sock_num;
421 for (i = 0; i < RTE_MAX_LCORE; i++) {
422 sock_num = rte_lcore_to_socket_id(i);
423 if (new_socket_id(sock_num)) {
424 if (num_sockets >= RTE_MAX_NUMA_NODES) {
425 rte_exit(EXIT_FAILURE,
426 "Total sockets greater than %u\n",
429 socket_ids[num_sockets++] = sock_num;
431 if (!rte_lcore_is_enabled(i))
433 if (i == rte_get_master_lcore())
435 fwd_lcores_cpuids[nb_lc++] = i;
437 nb_lcores = (lcoreid_t) nb_lc;
438 nb_cfg_lcores = nb_lcores;
443 set_def_peer_eth_addrs(void)
447 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
448 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
449 peer_eth_addrs[i].addr_bytes[5] = i;
454 set_default_fwd_ports_config(void)
458 for (pt_id = 0; pt_id < nb_ports; pt_id++)
459 fwd_ports_ids[pt_id] = pt_id;
461 nb_cfg_ports = nb_ports;
462 nb_fwd_ports = nb_ports;
466 set_def_fwd_config(void)
468 set_default_fwd_lcores_config();
469 set_def_peer_eth_addrs();
470 set_default_fwd_ports_config();
474 * Configuration initialisation done once at init time.
477 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
478 unsigned int socket_id)
480 char pool_name[RTE_MEMPOOL_NAMESIZE];
481 struct rte_mempool *rte_mp = NULL;
484 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
485 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
488 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
489 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
491 #ifdef RTE_LIBRTE_PMD_XENVIRT
492 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
493 (unsigned) mb_mempool_cache,
494 sizeof(struct rte_pktmbuf_pool_private),
495 rte_pktmbuf_pool_init, NULL,
496 rte_pktmbuf_init, NULL,
500 /* if the former XEN allocation failed fall back to normal allocation */
501 if (rte_mp == NULL) {
503 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504 mb_size, (unsigned) mb_mempool_cache,
505 sizeof(struct rte_pktmbuf_pool_private),
510 if (rte_mempool_populate_anon(rte_mp) == 0) {
511 rte_mempool_free(rte_mp);
515 rte_pktmbuf_pool_init(rte_mp, NULL);
516 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
518 /* wrapper to rte_mempool_create() */
519 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
520 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
525 if (rte_mp == NULL) {
526 rte_exit(EXIT_FAILURE,
527 "Creation of mbuf pool for socket %u failed: %s\n",
528 socket_id, rte_strerror(rte_errno));
529 } else if (verbose_level > 0) {
530 rte_mempool_dump(stdout, rte_mp);
535 * Check given socket id is valid or not with NUMA mode,
536 * if valid, return 0, else return -1
539 check_socket_id(const unsigned int socket_id)
541 static int warning_once = 0;
543 if (new_socket_id(socket_id)) {
544 if (!warning_once && numa_support)
545 printf("Warning: NUMA should be configured manually by"
546 " using --port-numa-config and"
547 " --ring-numa-config parameters along with"
559 struct rte_port *port;
560 struct rte_mempool *mbp;
561 unsigned int nb_mbuf_per_pool;
563 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
565 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
568 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
569 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
573 /* Configuration of logical cores. */
574 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
575 sizeof(struct fwd_lcore *) * nb_lcores,
576 RTE_CACHE_LINE_SIZE);
577 if (fwd_lcores == NULL) {
578 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
579 "failed\n", nb_lcores);
581 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
582 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
583 sizeof(struct fwd_lcore),
584 RTE_CACHE_LINE_SIZE);
585 if (fwd_lcores[lc_id] == NULL) {
586 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
589 fwd_lcores[lc_id]->cpuid_idx = lc_id;
592 RTE_ETH_FOREACH_DEV(pid) {
594 rte_eth_dev_info_get(pid, &port->dev_info);
597 if (port_numa[pid] != NUMA_NO_CONFIG)
598 port_per_socket[port_numa[pid]]++;
600 uint32_t socket_id = rte_eth_dev_socket_id(pid);
602 /* if socket_id is invalid, set to 0 */
603 if (check_socket_id(socket_id) < 0)
605 port_per_socket[socket_id]++;
609 /* set flag to initialize port/queue */
610 port->need_reconfig = 1;
611 port->need_reconfig_queues = 1;
615 * Create pools of mbuf.
616 * If NUMA support is disabled, create a single pool of mbuf in
617 * socket 0 memory by default.
618 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
620 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
621 * nb_txd can be configured at run time.
623 if (param_total_num_mbufs)
624 nb_mbuf_per_pool = param_total_num_mbufs;
626 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
627 (nb_lcores * mb_mempool_cache) +
628 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
629 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
635 for (i = 0; i < num_sockets; i++)
636 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
639 if (socket_num == UMA_NO_CONFIG)
640 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
642 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
649 * Records which Mbuf pool to use by each logical core, if needed.
651 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
652 mbp = mbuf_pool_find(
653 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
656 mbp = mbuf_pool_find(0);
657 fwd_lcores[lc_id]->mbp = mbp;
660 /* Configuration of packet forwarding streams. */
661 if (init_fwd_streams() < 0)
662 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
669 reconfig(portid_t new_port_id, unsigned socket_id)
671 struct rte_port *port;
673 /* Reconfiguration of Ethernet ports. */
674 port = &ports[new_port_id];
675 rte_eth_dev_info_get(new_port_id, &port->dev_info);
677 /* set flag to initialize port/queue */
678 port->need_reconfig = 1;
679 port->need_reconfig_queues = 1;
680 port->socket_id = socket_id;
687 init_fwd_streams(void)
690 struct rte_port *port;
691 streamid_t sm_id, nb_fwd_streams_new;
694 /* set socket id according to numa or not */
695 RTE_ETH_FOREACH_DEV(pid) {
697 if (nb_rxq > port->dev_info.max_rx_queues) {
698 printf("Fail: nb_rxq(%d) is greater than "
699 "max_rx_queues(%d)\n", nb_rxq,
700 port->dev_info.max_rx_queues);
703 if (nb_txq > port->dev_info.max_tx_queues) {
704 printf("Fail: nb_txq(%d) is greater than "
705 "max_tx_queues(%d)\n", nb_txq,
706 port->dev_info.max_tx_queues);
710 if (port_numa[pid] != NUMA_NO_CONFIG)
711 port->socket_id = port_numa[pid];
713 port->socket_id = rte_eth_dev_socket_id(pid);
715 /* if socket_id is invalid, set to 0 */
716 if (check_socket_id(port->socket_id) < 0)
721 if (socket_num == UMA_NO_CONFIG)
724 port->socket_id = socket_num;
728 q = RTE_MAX(nb_rxq, nb_txq);
730 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
733 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
734 if (nb_fwd_streams_new == nb_fwd_streams)
737 if (fwd_streams != NULL) {
738 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
739 if (fwd_streams[sm_id] == NULL)
741 rte_free(fwd_streams[sm_id]);
742 fwd_streams[sm_id] = NULL;
744 rte_free(fwd_streams);
749 nb_fwd_streams = nb_fwd_streams_new;
750 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
751 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
752 if (fwd_streams == NULL)
753 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
754 "failed\n", nb_fwd_streams);
756 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
757 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
758 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
759 if (fwd_streams[sm_id] == NULL)
760 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
767 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
769 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
771 unsigned int total_burst;
772 unsigned int nb_burst;
773 unsigned int burst_stats[3];
774 uint16_t pktnb_stats[3];
776 int burst_percent[3];
779 * First compute the total number of packet bursts and the
780 * two highest numbers of bursts of the same number of packets.
783 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
784 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
785 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
786 nb_burst = pbs->pkt_burst_spread[nb_pkt];
789 total_burst += nb_burst;
790 if (nb_burst > burst_stats[0]) {
791 burst_stats[1] = burst_stats[0];
792 pktnb_stats[1] = pktnb_stats[0];
793 burst_stats[0] = nb_burst;
794 pktnb_stats[0] = nb_pkt;
797 if (total_burst == 0)
799 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
800 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
801 burst_percent[0], (int) pktnb_stats[0]);
802 if (burst_stats[0] == total_burst) {
806 if (burst_stats[0] + burst_stats[1] == total_burst) {
807 printf(" + %d%% of %d pkts]\n",
808 100 - burst_percent[0], pktnb_stats[1]);
811 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
812 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
813 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
814 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
817 printf(" + %d%% of %d pkts + %d%% of others]\n",
818 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
820 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
823 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
825 struct rte_port *port;
828 static const char *fwd_stats_border = "----------------------";
830 port = &ports[port_id];
831 printf("\n %s Forward statistics for port %-2d %s\n",
832 fwd_stats_border, port_id, fwd_stats_border);
834 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
835 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
837 stats->ipackets, stats->imissed,
838 (uint64_t) (stats->ipackets + stats->imissed));
840 if (cur_fwd_eng == &csum_fwd_engine)
841 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
842 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
843 if ((stats->ierrors + stats->rx_nombuf) > 0) {
844 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
845 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
848 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
850 stats->opackets, port->tx_dropped,
851 (uint64_t) (stats->opackets + port->tx_dropped));
854 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
856 stats->ipackets, stats->imissed,
857 (uint64_t) (stats->ipackets + stats->imissed));
859 if (cur_fwd_eng == &csum_fwd_engine)
860 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
861 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
862 if ((stats->ierrors + stats->rx_nombuf) > 0) {
863 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
864 printf(" RX-nombufs: %14"PRIu64"\n",
868 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
870 stats->opackets, port->tx_dropped,
871 (uint64_t) (stats->opackets + port->tx_dropped));
874 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
876 pkt_burst_stats_display("RX",
877 &port->rx_stream->rx_burst_stats);
879 pkt_burst_stats_display("TX",
880 &port->tx_stream->tx_burst_stats);
883 if (port->rx_queue_stats_mapping_enabled) {
885 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
886 printf(" Stats reg %2d RX-packets:%14"PRIu64
887 " RX-errors:%14"PRIu64
888 " RX-bytes:%14"PRIu64"\n",
889 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
893 if (port->tx_queue_stats_mapping_enabled) {
894 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
895 printf(" Stats reg %2d TX-packets:%14"PRIu64
896 " TX-bytes:%14"PRIu64"\n",
897 i, stats->q_opackets[i], stats->q_obytes[i]);
901 printf(" %s--------------------------------%s\n",
902 fwd_stats_border, fwd_stats_border);
906 fwd_stream_stats_display(streamid_t stream_id)
908 struct fwd_stream *fs;
909 static const char *fwd_top_stats_border = "-------";
911 fs = fwd_streams[stream_id];
912 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
913 (fs->fwd_dropped == 0))
915 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
916 "TX Port=%2d/Queue=%2d %s\n",
917 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
918 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
919 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
920 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
922 /* if checksum mode */
923 if (cur_fwd_eng == &csum_fwd_engine) {
924 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
925 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
928 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
929 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
930 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
935 flush_fwd_rx_queues(void)
937 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
944 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
945 uint64_t timer_period;
947 /* convert to number of cycles */
948 timer_period = rte_get_timer_hz(); /* 1 second timeout */
950 for (j = 0; j < 2; j++) {
951 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
952 for (rxq = 0; rxq < nb_rxq; rxq++) {
953 port_id = fwd_ports_ids[rxp];
955 * testpmd can stuck in the below do while loop
956 * if rte_eth_rx_burst() always returns nonzero
957 * packets. So timer is added to exit this loop
958 * after 1sec timer expiry.
960 prev_tsc = rte_rdtsc();
962 nb_rx = rte_eth_rx_burst(port_id, rxq,
963 pkts_burst, MAX_PKT_BURST);
964 for (i = 0; i < nb_rx; i++)
965 rte_pktmbuf_free(pkts_burst[i]);
967 cur_tsc = rte_rdtsc();
968 diff_tsc = cur_tsc - prev_tsc;
969 timer_tsc += diff_tsc;
970 } while ((nb_rx > 0) &&
971 (timer_tsc < timer_period));
975 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
980 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
982 struct fwd_stream **fsm;
985 #ifdef RTE_LIBRTE_BITRATE
986 uint64_t tics_per_1sec;
988 uint64_t tics_current;
989 uint8_t idx_port, cnt_ports;
991 cnt_ports = rte_eth_dev_count();
992 tics_datum = rte_rdtsc();
993 tics_per_1sec = rte_get_timer_hz();
995 fsm = &fwd_streams[fc->stream_idx];
996 nb_fs = fc->stream_nb;
998 for (sm_id = 0; sm_id < nb_fs; sm_id++)
999 (*pkt_fwd)(fsm[sm_id]);
1000 #ifdef RTE_LIBRTE_BITRATE
1001 if (bitrate_enabled != 0 &&
1002 bitrate_lcore_id == rte_lcore_id()) {
1003 tics_current = rte_rdtsc();
1004 if (tics_current - tics_datum >= tics_per_1sec) {
1005 /* Periodic bitrate calculation */
1007 idx_port < cnt_ports;
1009 rte_stats_bitrate_calc(bitrate_data,
1011 tics_datum = tics_current;
1015 #ifdef RTE_LIBRTE_LATENCY_STATS
1016 if (latencystats_enabled != 0 &&
1017 latencystats_lcore_id == rte_lcore_id())
1018 rte_latencystats_update();
1021 } while (! fc->stopped);
1025 start_pkt_forward_on_core(void *fwd_arg)
1027 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1028 cur_fwd_config.fwd_eng->packet_fwd);
1033 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1034 * Used to start communication flows in network loopback test configurations.
1037 run_one_txonly_burst_on_core(void *fwd_arg)
1039 struct fwd_lcore *fwd_lc;
1040 struct fwd_lcore tmp_lcore;
1042 fwd_lc = (struct fwd_lcore *) fwd_arg;
1043 tmp_lcore = *fwd_lc;
1044 tmp_lcore.stopped = 1;
1045 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1050 * Launch packet forwarding:
1051 * - Setup per-port forwarding context.
1052 * - launch logical cores with their forwarding configuration.
1055 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1057 port_fwd_begin_t port_fwd_begin;
1062 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1063 if (port_fwd_begin != NULL) {
1064 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1065 (*port_fwd_begin)(fwd_ports_ids[i]);
1067 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1068 lc_id = fwd_lcores_cpuids[i];
1069 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1070 fwd_lcores[i]->stopped = 0;
1071 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1072 fwd_lcores[i], lc_id);
1074 printf("launch lcore %u failed - diag=%d\n",
1081 * Launch packet forwarding configuration.
1084 start_packet_forwarding(int with_tx_first)
1086 port_fwd_begin_t port_fwd_begin;
1087 port_fwd_end_t port_fwd_end;
1088 struct rte_port *port;
1093 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1094 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1096 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1097 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1099 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1100 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1101 (!nb_rxq || !nb_txq))
1102 rte_exit(EXIT_FAILURE,
1103 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1104 cur_fwd_eng->fwd_mode_name);
1106 if (all_ports_started() == 0) {
1107 printf("Not all ports were started\n");
1110 if (test_done == 0) {
1111 printf("Packet forwarding already started\n");
1115 if (init_fwd_streams() < 0) {
1116 printf("Fail from init_fwd_streams()\n");
1121 for (i = 0; i < nb_fwd_ports; i++) {
1122 pt_id = fwd_ports_ids[i];
1123 port = &ports[pt_id];
1124 if (!port->dcb_flag) {
1125 printf("In DCB mode, all forwarding ports must "
1126 "be configured in this mode.\n");
1130 if (nb_fwd_lcores == 1) {
1131 printf("In DCB mode,the nb forwarding cores "
1132 "should be larger than 1.\n");
1139 flush_fwd_rx_queues();
1142 pkt_fwd_config_display(&cur_fwd_config);
1143 rxtx_config_display();
1145 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1146 pt_id = fwd_ports_ids[i];
1147 port = &ports[pt_id];
1148 rte_eth_stats_get(pt_id, &port->stats);
1149 port->tx_dropped = 0;
1151 map_port_queue_stats_mapping_registers(pt_id, port);
1153 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1154 fwd_streams[sm_id]->rx_packets = 0;
1155 fwd_streams[sm_id]->tx_packets = 0;
1156 fwd_streams[sm_id]->fwd_dropped = 0;
1157 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1158 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1160 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1161 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1162 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1163 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1164 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1166 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167 fwd_streams[sm_id]->core_cycles = 0;
1170 if (with_tx_first) {
1171 port_fwd_begin = tx_only_engine.port_fwd_begin;
1172 if (port_fwd_begin != NULL) {
1173 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1174 (*port_fwd_begin)(fwd_ports_ids[i]);
1176 while (with_tx_first--) {
1177 launch_packet_forwarding(
1178 run_one_txonly_burst_on_core);
1179 rte_eal_mp_wait_lcore();
1181 port_fwd_end = tx_only_engine.port_fwd_end;
1182 if (port_fwd_end != NULL) {
1183 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1184 (*port_fwd_end)(fwd_ports_ids[i]);
1187 launch_packet_forwarding(start_pkt_forward_on_core);
1191 stop_packet_forwarding(void)
1193 struct rte_eth_stats stats;
1194 struct rte_port *port;
1195 port_fwd_end_t port_fwd_end;
1200 uint64_t total_recv;
1201 uint64_t total_xmit;
1202 uint64_t total_rx_dropped;
1203 uint64_t total_tx_dropped;
1204 uint64_t total_rx_nombuf;
1205 uint64_t tx_dropped;
1206 uint64_t rx_bad_ip_csum;
1207 uint64_t rx_bad_l4_csum;
1208 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1209 uint64_t fwd_cycles;
1211 static const char *acc_stats_border = "+++++++++++++++";
1214 printf("Packet forwarding not started\n");
1217 printf("Telling cores to stop...");
1218 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1219 fwd_lcores[lc_id]->stopped = 1;
1220 printf("\nWaiting for lcores to finish...\n");
1221 rte_eal_mp_wait_lcore();
1222 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1223 if (port_fwd_end != NULL) {
1224 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1225 pt_id = fwd_ports_ids[i];
1226 (*port_fwd_end)(pt_id);
1229 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1232 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1233 if (cur_fwd_config.nb_fwd_streams >
1234 cur_fwd_config.nb_fwd_ports) {
1235 fwd_stream_stats_display(sm_id);
1236 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1237 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1239 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1241 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1244 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1245 tx_dropped = (uint64_t) (tx_dropped +
1246 fwd_streams[sm_id]->fwd_dropped);
1247 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1250 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1251 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1252 fwd_streams[sm_id]->rx_bad_ip_csum);
1253 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1257 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1258 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1259 fwd_streams[sm_id]->rx_bad_l4_csum);
1260 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1263 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1264 fwd_cycles = (uint64_t) (fwd_cycles +
1265 fwd_streams[sm_id]->core_cycles);
1270 total_rx_dropped = 0;
1271 total_tx_dropped = 0;
1272 total_rx_nombuf = 0;
1273 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1274 pt_id = fwd_ports_ids[i];
1276 port = &ports[pt_id];
1277 rte_eth_stats_get(pt_id, &stats);
1278 stats.ipackets -= port->stats.ipackets;
1279 port->stats.ipackets = 0;
1280 stats.opackets -= port->stats.opackets;
1281 port->stats.opackets = 0;
1282 stats.ibytes -= port->stats.ibytes;
1283 port->stats.ibytes = 0;
1284 stats.obytes -= port->stats.obytes;
1285 port->stats.obytes = 0;
1286 stats.imissed -= port->stats.imissed;
1287 port->stats.imissed = 0;
1288 stats.oerrors -= port->stats.oerrors;
1289 port->stats.oerrors = 0;
1290 stats.rx_nombuf -= port->stats.rx_nombuf;
1291 port->stats.rx_nombuf = 0;
1293 total_recv += stats.ipackets;
1294 total_xmit += stats.opackets;
1295 total_rx_dropped += stats.imissed;
1296 total_tx_dropped += port->tx_dropped;
1297 total_rx_nombuf += stats.rx_nombuf;
1299 fwd_port_stats_display(pt_id, &stats);
1301 printf("\n %s Accumulated forward statistics for all ports"
1303 acc_stats_border, acc_stats_border);
1304 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1306 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1308 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1309 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1310 if (total_rx_nombuf > 0)
1311 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1312 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1314 acc_stats_border, acc_stats_border);
1315 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1317 printf("\n CPU cycles/packet=%u (total cycles="
1318 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1319 (unsigned int)(fwd_cycles / total_recv),
1320 fwd_cycles, total_recv);
1322 printf("\nDone.\n");
1327 dev_set_link_up(portid_t pid)
1329 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1330 printf("\nSet link up fail.\n");
1334 dev_set_link_down(portid_t pid)
1336 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1337 printf("\nSet link down fail.\n");
1341 all_ports_started(void)
1344 struct rte_port *port;
1346 RTE_ETH_FOREACH_DEV(pi) {
1348 /* Check if there is a port which is not started */
1349 if ((port->port_status != RTE_PORT_STARTED) &&
1350 (port->slave_flag == 0))
1354 /* No port is not started */
1359 all_ports_stopped(void)
1362 struct rte_port *port;
1364 RTE_ETH_FOREACH_DEV(pi) {
1366 if ((port->port_status != RTE_PORT_STOPPED) &&
1367 (port->slave_flag == 0))
1375 port_is_started(portid_t port_id)
1377 if (port_id_is_invalid(port_id, ENABLED_WARN))
1380 if (ports[port_id].port_status != RTE_PORT_STARTED)
1387 port_is_closed(portid_t port_id)
1389 if (port_id_is_invalid(port_id, ENABLED_WARN))
1392 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1399 start_port(portid_t pid)
1401 int diag, need_check_link_status = -1;
1404 struct rte_port *port;
1405 struct ether_addr mac_addr;
1406 enum rte_eth_event_type event_type;
1408 if (port_id_is_invalid(pid, ENABLED_WARN))
1413 RTE_ETH_FOREACH_DEV(pi) {
1414 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1417 need_check_link_status = 0;
1419 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1420 RTE_PORT_HANDLING) == 0) {
1421 printf("Port %d is now not stopped\n", pi);
1425 if (port->need_reconfig > 0) {
1426 port->need_reconfig = 0;
1428 printf("Configuring Port %d (socket %u)\n", pi,
1430 /* configure port */
1431 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1434 if (rte_atomic16_cmpset(&(port->port_status),
1435 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1436 printf("Port %d can not be set back "
1437 "to stopped\n", pi);
1438 printf("Fail to configure port %d\n", pi);
1439 /* try to reconfigure port next time */
1440 port->need_reconfig = 1;
1444 if (port->need_reconfig_queues > 0) {
1445 port->need_reconfig_queues = 0;
1446 /* setup tx queues */
1447 for (qi = 0; qi < nb_txq; qi++) {
1448 if ((numa_support) &&
1449 (txring_numa[pi] != NUMA_NO_CONFIG))
1450 diag = rte_eth_tx_queue_setup(pi, qi,
1451 nb_txd,txring_numa[pi],
1454 diag = rte_eth_tx_queue_setup(pi, qi,
1455 nb_txd,port->socket_id,
1461 /* Fail to setup tx queue, return */
1462 if (rte_atomic16_cmpset(&(port->port_status),
1464 RTE_PORT_STOPPED) == 0)
1465 printf("Port %d can not be set back "
1466 "to stopped\n", pi);
1467 printf("Fail to configure port %d tx queues\n", pi);
1468 /* try to reconfigure queues next time */
1469 port->need_reconfig_queues = 1;
1472 /* setup rx queues */
1473 for (qi = 0; qi < nb_rxq; qi++) {
1474 if ((numa_support) &&
1475 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1476 struct rte_mempool * mp =
1477 mbuf_pool_find(rxring_numa[pi]);
1479 printf("Failed to setup RX queue:"
1480 "No mempool allocation"
1481 " on the socket %d\n",
1486 diag = rte_eth_rx_queue_setup(pi, qi,
1487 nb_rxd,rxring_numa[pi],
1488 &(port->rx_conf),mp);
1490 struct rte_mempool *mp =
1491 mbuf_pool_find(port->socket_id);
1493 printf("Failed to setup RX queue:"
1494 "No mempool allocation"
1495 " on the socket %d\n",
1499 diag = rte_eth_rx_queue_setup(pi, qi,
1500 nb_rxd,port->socket_id,
1501 &(port->rx_conf), mp);
1506 /* Fail to setup rx queue, return */
1507 if (rte_atomic16_cmpset(&(port->port_status),
1509 RTE_PORT_STOPPED) == 0)
1510 printf("Port %d can not be set back "
1511 "to stopped\n", pi);
1512 printf("Fail to configure port %d rx queues\n", pi);
1513 /* try to reconfigure queues next time */
1514 port->need_reconfig_queues = 1;
1519 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1520 event_type < RTE_ETH_EVENT_MAX;
1522 diag = rte_eth_dev_callback_register(pi,
1527 printf("Failed to setup even callback for event %d\n",
1534 if (rte_eth_dev_start(pi) < 0) {
1535 printf("Fail to start port %d\n", pi);
1537 /* Fail to setup rx queue, return */
1538 if (rte_atomic16_cmpset(&(port->port_status),
1539 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1540 printf("Port %d can not be set back to "
1545 if (rte_atomic16_cmpset(&(port->port_status),
1546 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1547 printf("Port %d can not be set into started\n", pi);
1549 rte_eth_macaddr_get(pi, &mac_addr);
1550 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1551 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1552 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1553 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1555 /* at least one port started, need checking link status */
1556 need_check_link_status = 1;
1559 if (need_check_link_status == 1 && !no_link_check)
1560 check_all_ports_link_status(RTE_PORT_ALL);
1561 else if (need_check_link_status == 0)
1562 printf("Please stop the ports first\n");
1569 stop_port(portid_t pid)
1572 struct rte_port *port;
1573 int need_check_link_status = 0;
1580 if (port_id_is_invalid(pid, ENABLED_WARN))
1583 printf("Stopping ports...\n");
1585 RTE_ETH_FOREACH_DEV(pi) {
1586 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1589 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1590 printf("Please remove port %d from forwarding configuration.\n", pi);
1594 if (port_is_bonding_slave(pi)) {
1595 printf("Please remove port %d from bonded device.\n", pi);
1600 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1601 RTE_PORT_HANDLING) == 0)
1604 rte_eth_dev_stop(pi);
1606 if (rte_atomic16_cmpset(&(port->port_status),
1607 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1608 printf("Port %d can not be set into stopped\n", pi);
1609 need_check_link_status = 1;
1611 if (need_check_link_status && !no_link_check)
1612 check_all_ports_link_status(RTE_PORT_ALL);
1618 close_port(portid_t pid)
1621 struct rte_port *port;
1623 if (port_id_is_invalid(pid, ENABLED_WARN))
1626 printf("Closing ports...\n");
1628 RTE_ETH_FOREACH_DEV(pi) {
1629 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1632 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1633 printf("Please remove port %d from forwarding configuration.\n", pi);
1637 if (port_is_bonding_slave(pi)) {
1638 printf("Please remove port %d from bonded device.\n", pi);
1643 if (rte_atomic16_cmpset(&(port->port_status),
1644 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1645 printf("Port %d is already closed\n", pi);
1649 if (rte_atomic16_cmpset(&(port->port_status),
1650 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1651 printf("Port %d is now not stopped\n", pi);
1655 if (port->flow_list)
1656 port_flow_flush(pi);
1657 rte_eth_dev_close(pi);
1659 if (rte_atomic16_cmpset(&(port->port_status),
1660 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1661 printf("Port %d cannot be set to closed\n", pi);
1668 attach_port(char *identifier)
1671 unsigned int socket_id;
1673 printf("Attaching a new port...\n");
1675 if (identifier == NULL) {
1676 printf("Invalid parameters are specified\n");
1680 if (rte_eth_dev_attach(identifier, &pi))
1683 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1684 /* if socket_id is invalid, set to 0 */
1685 if (check_socket_id(socket_id) < 0)
1687 reconfig(pi, socket_id);
1688 rte_eth_promiscuous_enable(pi);
1690 nb_ports = rte_eth_dev_count();
1692 ports[pi].port_status = RTE_PORT_STOPPED;
1694 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1699 detach_port(uint8_t port_id)
1701 char name[RTE_ETH_NAME_MAX_LEN];
1703 printf("Detaching a port...\n");
1705 if (!port_is_closed(port_id)) {
1706 printf("Please close port first\n");
1710 if (ports[port_id].flow_list)
1711 port_flow_flush(port_id);
1713 if (rte_eth_dev_detach(port_id, name))
1716 nb_ports = rte_eth_dev_count();
1718 printf("Port '%s' is detached. Now total ports is %d\n",
1730 stop_packet_forwarding();
1732 if (ports != NULL) {
1734 RTE_ETH_FOREACH_DEV(pt_id) {
1735 printf("\nShutting down port %d...\n", pt_id);
1741 printf("\nBye...\n");
1744 typedef void (*cmd_func_t)(void);
1745 struct pmd_test_command {
1746 const char *cmd_name;
1747 cmd_func_t cmd_func;
1750 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1752 /* Check the link status of all ports in up to 9s, and print them finally */
1754 check_all_ports_link_status(uint32_t port_mask)
1756 #define CHECK_INTERVAL 100 /* 100ms */
1757 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1758 uint8_t portid, count, all_ports_up, print_flag = 0;
1759 struct rte_eth_link link;
1761 printf("Checking link statuses...\n");
1763 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1765 RTE_ETH_FOREACH_DEV(portid) {
1766 if ((port_mask & (1 << portid)) == 0)
1768 memset(&link, 0, sizeof(link));
1769 rte_eth_link_get_nowait(portid, &link);
1770 /* print link status if flag set */
1771 if (print_flag == 1) {
1772 if (link.link_status)
1773 printf("Port %d Link Up - speed %u "
1774 "Mbps - %s\n", (uint8_t)portid,
1775 (unsigned)link.link_speed,
1776 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1777 ("full-duplex") : ("half-duplex\n"));
1779 printf("Port %d Link Down\n",
1783 /* clear all_ports_up flag if any link down */
1784 if (link.link_status == ETH_LINK_DOWN) {
1789 /* after finally printing all link status, get out */
1790 if (print_flag == 1)
1793 if (all_ports_up == 0) {
1795 rte_delay_ms(CHECK_INTERVAL);
1798 /* set the print_flag if all ports up or timeout */
1799 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1809 rmv_event_callback(void *arg)
1811 struct rte_eth_dev *dev;
1812 struct rte_devargs *da;
1814 uint8_t port_id = (intptr_t)arg;
1816 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1817 dev = &rte_eth_devices[port_id];
1818 da = dev->device->devargs;
1821 close_port(port_id);
1822 if (da->type == RTE_DEVTYPE_VIRTUAL)
1823 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1824 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1825 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1826 printf("removing device %s\n", name);
1827 rte_eal_dev_detach(name);
1828 dev->state = RTE_ETH_DEV_UNUSED;
1831 /* This function is used by the interrupt thread */
1833 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1835 static const char * const event_desc[] = {
1836 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1837 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1838 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1839 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1840 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1841 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1842 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1843 [RTE_ETH_EVENT_MAX] = NULL,
1846 RTE_SET_USED(param);
1848 if (type >= RTE_ETH_EVENT_MAX) {
1849 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1850 port_id, __func__, type);
1852 } else if (event_print_mask & (UINT32_C(1) << type)) {
1853 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1859 case RTE_ETH_EVENT_INTR_RMV:
1860 if (rte_eal_alarm_set(100000,
1861 rmv_event_callback, (void *)(intptr_t)port_id))
1862 fprintf(stderr, "Could not set up deferred device removal\n");
1870 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1874 uint8_t mapping_found = 0;
1876 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1877 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1878 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1879 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1880 tx_queue_stats_mappings[i].queue_id,
1881 tx_queue_stats_mappings[i].stats_counter_id);
1888 port->tx_queue_stats_mapping_enabled = 1;
1893 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1897 uint8_t mapping_found = 0;
1899 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1900 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1901 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1902 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1903 rx_queue_stats_mappings[i].queue_id,
1904 rx_queue_stats_mappings[i].stats_counter_id);
1911 port->rx_queue_stats_mapping_enabled = 1;
1916 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1920 diag = set_tx_queue_stats_mapping_registers(pi, port);
1922 if (diag == -ENOTSUP) {
1923 port->tx_queue_stats_mapping_enabled = 0;
1924 printf("TX queue stats mapping not supported port id=%d\n", pi);
1927 rte_exit(EXIT_FAILURE,
1928 "set_tx_queue_stats_mapping_registers "
1929 "failed for port id=%d diag=%d\n",
1933 diag = set_rx_queue_stats_mapping_registers(pi, port);
1935 if (diag == -ENOTSUP) {
1936 port->rx_queue_stats_mapping_enabled = 0;
1937 printf("RX queue stats mapping not supported port id=%d\n", pi);
1940 rte_exit(EXIT_FAILURE,
1941 "set_rx_queue_stats_mapping_registers "
1942 "failed for port id=%d diag=%d\n",
1948 rxtx_port_config(struct rte_port *port)
1950 port->rx_conf = port->dev_info.default_rxconf;
1951 port->tx_conf = port->dev_info.default_txconf;
1953 /* Check if any RX/TX parameters have been passed */
1954 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1955 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1957 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1958 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1960 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1961 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1963 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1964 port->rx_conf.rx_free_thresh = rx_free_thresh;
1966 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1967 port->rx_conf.rx_drop_en = rx_drop_en;
1969 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1970 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1972 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1973 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1975 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1976 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1978 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1979 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1981 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1982 port->tx_conf.tx_free_thresh = tx_free_thresh;
1984 if (txq_flags != RTE_PMD_PARAM_UNSET)
1985 port->tx_conf.txq_flags = txq_flags;
1989 init_port_config(void)
1992 struct rte_port *port;
1994 RTE_ETH_FOREACH_DEV(pid) {
1996 port->dev_conf.rxmode = rx_mode;
1997 port->dev_conf.fdir_conf = fdir_conf;
1999 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2000 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2002 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2003 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2006 if (port->dcb_flag == 0) {
2007 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2008 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2010 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2013 rxtx_port_config(port);
2015 rte_eth_macaddr_get(pid, &port->eth_addr);
2017 map_port_queue_stats_mapping_registers(pid, port);
2018 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2019 rte_pmd_ixgbe_bypass_init(pid);
2022 if (lsc_interrupt &&
2023 (rte_eth_devices[pid].data->dev_flags &
2024 RTE_ETH_DEV_INTR_LSC))
2025 port->dev_conf.intr_conf.lsc = 1;
2026 if (rmv_interrupt &&
2027 (rte_eth_devices[pid].data->dev_flags &
2028 RTE_ETH_DEV_INTR_RMV))
2029 port->dev_conf.intr_conf.rmv = 1;
2033 void set_port_slave_flag(portid_t slave_pid)
2035 struct rte_port *port;
2037 port = &ports[slave_pid];
2038 port->slave_flag = 1;
2041 void clear_port_slave_flag(portid_t slave_pid)
2043 struct rte_port *port;
2045 port = &ports[slave_pid];
2046 port->slave_flag = 0;
2049 uint8_t port_is_bonding_slave(portid_t slave_pid)
2051 struct rte_port *port;
2053 port = &ports[slave_pid];
2054 return port->slave_flag;
2057 const uint16_t vlan_tags[] = {
2058 0, 1, 2, 3, 4, 5, 6, 7,
2059 8, 9, 10, 11, 12, 13, 14, 15,
2060 16, 17, 18, 19, 20, 21, 22, 23,
2061 24, 25, 26, 27, 28, 29, 30, 31
2065 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2066 enum dcb_mode_enable dcb_mode,
2067 enum rte_eth_nb_tcs num_tcs,
2073 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2074 * given above, and the number of traffic classes available for use.
2076 if (dcb_mode == DCB_VT_ENABLED) {
2077 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2078 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2079 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2080 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2082 /* VMDQ+DCB RX and TX configurations */
2083 vmdq_rx_conf->enable_default_pool = 0;
2084 vmdq_rx_conf->default_pool = 0;
2085 vmdq_rx_conf->nb_queue_pools =
2086 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2087 vmdq_tx_conf->nb_queue_pools =
2088 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2090 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2091 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2092 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2093 vmdq_rx_conf->pool_map[i].pools =
2094 1 << (i % vmdq_rx_conf->nb_queue_pools);
2096 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2097 vmdq_rx_conf->dcb_tc[i] = i;
2098 vmdq_tx_conf->dcb_tc[i] = i;
2101 /* set DCB mode of RX and TX of multiple queues */
2102 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2103 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2105 struct rte_eth_dcb_rx_conf *rx_conf =
2106 ð_conf->rx_adv_conf.dcb_rx_conf;
2107 struct rte_eth_dcb_tx_conf *tx_conf =
2108 ð_conf->tx_adv_conf.dcb_tx_conf;
2110 rx_conf->nb_tcs = num_tcs;
2111 tx_conf->nb_tcs = num_tcs;
2113 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2114 rx_conf->dcb_tc[i] = i % num_tcs;
2115 tx_conf->dcb_tc[i] = i % num_tcs;
2117 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2118 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2119 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2123 eth_conf->dcb_capability_en =
2124 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2126 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2132 init_port_dcb_config(portid_t pid,
2133 enum dcb_mode_enable dcb_mode,
2134 enum rte_eth_nb_tcs num_tcs,
2137 struct rte_eth_conf port_conf;
2138 struct rte_port *rte_port;
2142 rte_port = &ports[pid];
2144 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2145 /* Enter DCB configuration status */
2148 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2149 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2152 port_conf.rxmode.hw_vlan_filter = 1;
2155 * Write the configuration into the device.
2156 * Set the numbers of RX & TX queues to 0, so
2157 * the RX & TX queues will not be setup.
2159 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2161 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2163 /* If dev_info.vmdq_pool_base is greater than 0,
2164 * the queue id of vmdq pools is started after pf queues.
2166 if (dcb_mode == DCB_VT_ENABLED &&
2167 rte_port->dev_info.vmdq_pool_base > 0) {
2168 printf("VMDQ_DCB multi-queue mode is nonsensical"
2169 " for port %d.", pid);
2173 /* Assume the ports in testpmd have the same dcb capability
2174 * and has the same number of rxq and txq in dcb mode
2176 if (dcb_mode == DCB_VT_ENABLED) {
2177 if (rte_port->dev_info.max_vfs > 0) {
2178 nb_rxq = rte_port->dev_info.nb_rx_queues;
2179 nb_txq = rte_port->dev_info.nb_tx_queues;
2181 nb_rxq = rte_port->dev_info.max_rx_queues;
2182 nb_txq = rte_port->dev_info.max_tx_queues;
2185 /*if vt is disabled, use all pf queues */
2186 if (rte_port->dev_info.vmdq_pool_base == 0) {
2187 nb_rxq = rte_port->dev_info.max_rx_queues;
2188 nb_txq = rte_port->dev_info.max_tx_queues;
2190 nb_rxq = (queueid_t)num_tcs;
2191 nb_txq = (queueid_t)num_tcs;
2195 rx_free_thresh = 64;
2197 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2199 rxtx_port_config(rte_port);
2201 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2202 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2203 rx_vft_set(pid, vlan_tags[i], 1);
2205 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2206 map_port_queue_stats_mapping_registers(pid, rte_port);
2208 rte_port->dcb_flag = 1;
2216 /* Configuration of Ethernet ports. */
2217 ports = rte_zmalloc("testpmd: ports",
2218 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2219 RTE_CACHE_LINE_SIZE);
2220 if (ports == NULL) {
2221 rte_exit(EXIT_FAILURE,
2222 "rte_zmalloc(%d struct rte_port) failed\n",
2235 signal_handler(int signum)
2237 if (signum == SIGINT || signum == SIGTERM) {
2238 printf("\nSignal %d received, preparing to exit...\n",
2240 #ifdef RTE_LIBRTE_PDUMP
2241 /* uninitialize packet capture framework */
2244 #ifdef RTE_LIBRTE_LATENCY_STATS
2245 rte_latencystats_uninit();
2248 /* exit with the expected status */
2249 signal(signum, SIG_DFL);
2250 kill(getpid(), signum);
2255 main(int argc, char** argv)
2260 signal(SIGINT, signal_handler);
2261 signal(SIGTERM, signal_handler);
2263 diag = rte_eal_init(argc, argv);
2265 rte_panic("Cannot init EAL\n");
2267 #ifdef RTE_LIBRTE_PDUMP
2268 /* initialize packet capture framework */
2269 rte_pdump_init(NULL);
2272 nb_ports = (portid_t) rte_eth_dev_count();
2274 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2276 /* allocate port structures, and init them */
2279 set_def_fwd_config();
2281 rte_panic("Empty set of forwarding logical cores - check the "
2282 "core mask supplied in the command parameters\n");
2284 /* Bitrate/latency stats disabled by default */
2285 #ifdef RTE_LIBRTE_BITRATE
2286 bitrate_enabled = 0;
2288 #ifdef RTE_LIBRTE_LATENCY_STATS
2289 latencystats_enabled = 0;
2295 launch_args_parse(argc, argv);
2297 if (!nb_rxq && !nb_txq)
2298 printf("Warning: Either rx or tx queues should be non-zero\n");
2300 if (nb_rxq > 1 && nb_rxq > nb_txq)
2301 printf("Warning: nb_rxq=%d enables RSS configuration, "
2302 "but nb_txq=%d will prevent to fully test it.\n",
2306 if (start_port(RTE_PORT_ALL) != 0)
2307 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2309 /* set all ports to promiscuous mode by default */
2310 RTE_ETH_FOREACH_DEV(port_id)
2311 rte_eth_promiscuous_enable(port_id);
2313 /* Init metrics library */
2314 rte_metrics_init(rte_socket_id());
2316 #ifdef RTE_LIBRTE_LATENCY_STATS
2317 if (latencystats_enabled != 0) {
2318 int ret = rte_latencystats_init(1, NULL);
2320 printf("Warning: latencystats init()"
2321 " returned error %d\n", ret);
2322 printf("Latencystats running on lcore %d\n",
2323 latencystats_lcore_id);
2327 /* Setup bitrate stats */
2328 #ifdef RTE_LIBRTE_BITRATE
2329 if (bitrate_enabled != 0) {
2330 bitrate_data = rte_stats_bitrate_create();
2331 if (bitrate_data == NULL)
2332 rte_exit(EXIT_FAILURE,
2333 "Could not allocate bitrate data.\n");
2334 rte_stats_bitrate_reg(bitrate_data);
2338 #ifdef RTE_LIBRTE_CMDLINE
2339 if (strlen(cmdline_filename) != 0)
2340 cmdline_read_from_file(cmdline_filename);
2342 if (interactive == 1) {
2344 printf("Start automatic packet forwarding\n");
2345 start_packet_forwarding(0);
2355 printf("No commandline core given, start packet forwarding\n");
2356 start_packet_forwarding(0);
2357 printf("Press enter to exit\n");
2358 rc = read(0, &c, 1);