4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
97 uint16_t verbose_level = 0; /**< Silent by default. */
99 /* use master core for command line ? */
100 uint8_t interactive = 0;
101 uint8_t auto_start = 0;
103 char cmdline_filename[PATH_MAX] = {0};
106 * NUMA support configuration.
107 * When set, the NUMA support attempts to dispatch the allocation of the
108 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
109 * probed ports among the CPU sockets 0 and 1.
110 * Otherwise, all memory is allocated from CPU socket 0.
112 uint8_t numa_support = 1; /**< numa enabled by default */
115 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
118 uint8_t socket_num = UMA_NO_CONFIG;
121 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
126 * Record the Ethernet address of peer target ports to which packets are
128 * Must be instantiated with the ethernet addresses of peer traffic generator
131 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
132 portid_t nb_peer_eth_addrs = 0;
135 * Probed Target Environment.
137 struct rte_port *ports; /**< For all probed ethernet ports. */
138 portid_t nb_ports; /**< Number of probed ethernet ports. */
139 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
140 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
143 * Test Forwarding Configuration.
144 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
145 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
147 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
148 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
149 portid_t nb_cfg_ports; /**< Number of configured ports. */
150 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
152 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
153 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
155 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
156 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
159 * Forwarding engines.
161 struct fwd_engine * fwd_engines[] = {
170 #ifdef RTE_LIBRTE_IEEE1588
171 &ieee1588_fwd_engine,
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
184 * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
187 * Configuration of packet segments used by the "txonly" processing engine.
189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191 TXONLY_DEF_PACKET_LEN,
193 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
196 /**< Split policy for packets to TX. */
198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
201 /* current configuration is in DCB or not,0 means it is not in DCB mode */
202 uint8_t dcb_config = 0;
204 /* Whether the dcb is in testing status */
205 uint8_t dcb_test = 0;
208 * Configurable number of RX/TX queues.
210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
214 * Configurable number of RX/TX ring descriptors.
216 #define RTE_TEST_RX_DESC_DEFAULT 128
217 #define RTE_TEST_TX_DESC_DEFAULT 512
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 #define RTE_PMD_PARAM_UNSET -1
223 * Configurable values of RX and TX ring threshold registers.
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX free threshold.
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of RX drop enable.
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX free threshold.
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX RS bit threshold.
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
255 * Configurable value of TX queue flags.
257 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
260 * Receive Side Scaling (RSS) configuration.
262 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
265 * Port topology configuration
267 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
270 * Avoids to flush all the RX streams before starts forwarding.
272 uint8_t no_flush_rx = 0; /* flush by default */
275 * Avoids to check link status when starting/stopping a port.
277 uint8_t no_link_check = 0; /* check by default */
280 * Enable link status change notification
282 uint8_t lsc_interrupt = 1; /* enabled by default */
285 * Enable device removal notification.
287 uint8_t rmv_interrupt = 1; /* enabled by default */
290 * Display or mask ether events
291 * Default to all events except VF_MBOX
293 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
294 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
295 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
297 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 * NIC bypass mode configuration options.
304 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
305 /* The NIC bypass watchdog timeout. */
306 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
310 #ifdef RTE_LIBRTE_LATENCY_STATS
313 * Set when latency stats is enabled in the commandline
315 uint8_t latencystats_enabled;
318 * Lcore ID to serive latency statistics.
320 lcoreid_t latencystats_lcore_id = -1;
325 * Ethernet device configuration.
327 struct rte_eth_rxmode rx_mode = {
328 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
330 .header_split = 0, /**< Header Split disabled. */
331 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
332 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
333 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
334 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
335 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
336 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
339 struct rte_fdir_conf fdir_conf = {
340 .mode = RTE_FDIR_MODE_NONE,
341 .pballoc = RTE_FDIR_PBALLOC_64K,
342 .status = RTE_FDIR_REPORT_STATUS,
344 .vlan_tci_mask = 0x0,
346 .src_ip = 0xFFFFFFFF,
347 .dst_ip = 0xFFFFFFFF,
350 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 .src_port_mask = 0xFFFF,
354 .dst_port_mask = 0xFFFF,
355 .mac_addr_byte_mask = 0xFF,
356 .tunnel_type_mask = 1,
357 .tunnel_id_mask = 0xFFFFFFFF,
362 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
364 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
365 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
368 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
370 uint16_t nb_tx_queue_stats_mappings = 0;
371 uint16_t nb_rx_queue_stats_mappings = 0;
373 unsigned int num_sockets = 0;
374 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
376 #ifdef RTE_LIBRTE_BITRATE
377 /* Bitrate statistics */
378 struct rte_stats_bitrates *bitrate_data;
379 lcoreid_t bitrate_lcore_id;
380 uint8_t bitrate_enabled;
383 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
385 /* Forward function declarations */
386 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
387 static void check_all_ports_link_status(uint32_t port_mask);
388 static int eth_event_callback(uint8_t port_id,
389 enum rte_eth_event_type type,
390 void *param, void *ret_param);
393 * Check if all the ports are started.
394 * If yes, return positive value. If not, return zero.
396 static int all_ports_started(void);
399 * Helper function to check if socket is already discovered.
400 * If yes, return positive value. If not, return zero.
403 new_socket_id(unsigned int socket_id)
407 for (i = 0; i < num_sockets; i++) {
408 if (socket_ids[i] == socket_id)
415 * Setup default configuration.
418 set_default_fwd_lcores_config(void)
422 unsigned int sock_num;
425 for (i = 0; i < RTE_MAX_LCORE; i++) {
426 sock_num = rte_lcore_to_socket_id(i);
427 if (new_socket_id(sock_num)) {
428 if (num_sockets >= RTE_MAX_NUMA_NODES) {
429 rte_exit(EXIT_FAILURE,
430 "Total sockets greater than %u\n",
433 socket_ids[num_sockets++] = sock_num;
435 if (!rte_lcore_is_enabled(i))
437 if (i == rte_get_master_lcore())
439 fwd_lcores_cpuids[nb_lc++] = i;
441 nb_lcores = (lcoreid_t) nb_lc;
442 nb_cfg_lcores = nb_lcores;
447 set_def_peer_eth_addrs(void)
451 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
452 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
453 peer_eth_addrs[i].addr_bytes[5] = i;
458 set_default_fwd_ports_config(void)
462 for (pt_id = 0; pt_id < nb_ports; pt_id++)
463 fwd_ports_ids[pt_id] = pt_id;
465 nb_cfg_ports = nb_ports;
466 nb_fwd_ports = nb_ports;
470 set_def_fwd_config(void)
472 set_default_fwd_lcores_config();
473 set_def_peer_eth_addrs();
474 set_default_fwd_ports_config();
478 * Configuration initialisation done once at init time.
481 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
482 unsigned int socket_id)
484 char pool_name[RTE_MEMPOOL_NAMESIZE];
485 struct rte_mempool *rte_mp = NULL;
488 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
489 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
492 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
493 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
495 #ifdef RTE_LIBRTE_PMD_XENVIRT
496 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
497 (unsigned) mb_mempool_cache,
498 sizeof(struct rte_pktmbuf_pool_private),
499 rte_pktmbuf_pool_init, NULL,
500 rte_pktmbuf_init, NULL,
504 /* if the former XEN allocation failed fall back to normal allocation */
505 if (rte_mp == NULL) {
507 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
508 mb_size, (unsigned) mb_mempool_cache,
509 sizeof(struct rte_pktmbuf_pool_private),
514 if (rte_mempool_populate_anon(rte_mp) == 0) {
515 rte_mempool_free(rte_mp);
519 rte_pktmbuf_pool_init(rte_mp, NULL);
520 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
522 /* wrapper to rte_mempool_create() */
523 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
524 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
529 if (rte_mp == NULL) {
530 rte_exit(EXIT_FAILURE,
531 "Creation of mbuf pool for socket %u failed: %s\n",
532 socket_id, rte_strerror(rte_errno));
533 } else if (verbose_level > 0) {
534 rte_mempool_dump(stdout, rte_mp);
539 * Check given socket id is valid or not with NUMA mode,
540 * if valid, return 0, else return -1
543 check_socket_id(const unsigned int socket_id)
545 static int warning_once = 0;
547 if (new_socket_id(socket_id)) {
548 if (!warning_once && numa_support)
549 printf("Warning: NUMA should be configured manually by"
550 " using --port-numa-config and"
551 " --ring-numa-config parameters along with"
563 struct rte_port *port;
564 struct rte_mempool *mbp;
565 unsigned int nb_mbuf_per_pool;
567 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
569 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
572 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
573 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
574 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
577 /* Configuration of logical cores. */
578 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
579 sizeof(struct fwd_lcore *) * nb_lcores,
580 RTE_CACHE_LINE_SIZE);
581 if (fwd_lcores == NULL) {
582 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
583 "failed\n", nb_lcores);
585 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
586 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
587 sizeof(struct fwd_lcore),
588 RTE_CACHE_LINE_SIZE);
589 if (fwd_lcores[lc_id] == NULL) {
590 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
593 fwd_lcores[lc_id]->cpuid_idx = lc_id;
596 RTE_ETH_FOREACH_DEV(pid) {
598 rte_eth_dev_info_get(pid, &port->dev_info);
601 if (port_numa[pid] != NUMA_NO_CONFIG)
602 port_per_socket[port_numa[pid]]++;
604 uint32_t socket_id = rte_eth_dev_socket_id(pid);
606 /* if socket_id is invalid, set to 0 */
607 if (check_socket_id(socket_id) < 0)
609 port_per_socket[socket_id]++;
613 /* set flag to initialize port/queue */
614 port->need_reconfig = 1;
615 port->need_reconfig_queues = 1;
619 * Create pools of mbuf.
620 * If NUMA support is disabled, create a single pool of mbuf in
621 * socket 0 memory by default.
622 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
624 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
625 * nb_txd can be configured at run time.
627 if (param_total_num_mbufs)
628 nb_mbuf_per_pool = param_total_num_mbufs;
630 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
631 (nb_lcores * mb_mempool_cache) +
632 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
633 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
639 for (i = 0; i < num_sockets; i++)
640 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
643 if (socket_num == UMA_NO_CONFIG)
644 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
646 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
653 * Records which Mbuf pool to use by each logical core, if needed.
655 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
656 mbp = mbuf_pool_find(
657 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
660 mbp = mbuf_pool_find(0);
661 fwd_lcores[lc_id]->mbp = mbp;
664 /* Configuration of packet forwarding streams. */
665 if (init_fwd_streams() < 0)
666 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
673 reconfig(portid_t new_port_id, unsigned socket_id)
675 struct rte_port *port;
677 /* Reconfiguration of Ethernet ports. */
678 port = &ports[new_port_id];
679 rte_eth_dev_info_get(new_port_id, &port->dev_info);
681 /* set flag to initialize port/queue */
682 port->need_reconfig = 1;
683 port->need_reconfig_queues = 1;
684 port->socket_id = socket_id;
691 init_fwd_streams(void)
694 struct rte_port *port;
695 streamid_t sm_id, nb_fwd_streams_new;
698 /* set socket id according to numa or not */
699 RTE_ETH_FOREACH_DEV(pid) {
701 if (nb_rxq > port->dev_info.max_rx_queues) {
702 printf("Fail: nb_rxq(%d) is greater than "
703 "max_rx_queues(%d)\n", nb_rxq,
704 port->dev_info.max_rx_queues);
707 if (nb_txq > port->dev_info.max_tx_queues) {
708 printf("Fail: nb_txq(%d) is greater than "
709 "max_tx_queues(%d)\n", nb_txq,
710 port->dev_info.max_tx_queues);
714 if (port_numa[pid] != NUMA_NO_CONFIG)
715 port->socket_id = port_numa[pid];
717 port->socket_id = rte_eth_dev_socket_id(pid);
719 /* if socket_id is invalid, set to 0 */
720 if (check_socket_id(port->socket_id) < 0)
725 if (socket_num == UMA_NO_CONFIG)
728 port->socket_id = socket_num;
732 q = RTE_MAX(nb_rxq, nb_txq);
734 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
737 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
738 if (nb_fwd_streams_new == nb_fwd_streams)
741 if (fwd_streams != NULL) {
742 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
743 if (fwd_streams[sm_id] == NULL)
745 rte_free(fwd_streams[sm_id]);
746 fwd_streams[sm_id] = NULL;
748 rte_free(fwd_streams);
753 nb_fwd_streams = nb_fwd_streams_new;
754 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
755 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
756 if (fwd_streams == NULL)
757 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
758 "failed\n", nb_fwd_streams);
760 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
761 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
762 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
763 if (fwd_streams[sm_id] == NULL)
764 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
771 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
773 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
775 unsigned int total_burst;
776 unsigned int nb_burst;
777 unsigned int burst_stats[3];
778 uint16_t pktnb_stats[3];
780 int burst_percent[3];
783 * First compute the total number of packet bursts and the
784 * two highest numbers of bursts of the same number of packets.
787 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
788 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
789 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
790 nb_burst = pbs->pkt_burst_spread[nb_pkt];
793 total_burst += nb_burst;
794 if (nb_burst > burst_stats[0]) {
795 burst_stats[1] = burst_stats[0];
796 pktnb_stats[1] = pktnb_stats[0];
797 burst_stats[0] = nb_burst;
798 pktnb_stats[0] = nb_pkt;
801 if (total_burst == 0)
803 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
804 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
805 burst_percent[0], (int) pktnb_stats[0]);
806 if (burst_stats[0] == total_burst) {
810 if (burst_stats[0] + burst_stats[1] == total_burst) {
811 printf(" + %d%% of %d pkts]\n",
812 100 - burst_percent[0], pktnb_stats[1]);
815 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
816 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
817 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
818 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
821 printf(" + %d%% of %d pkts + %d%% of others]\n",
822 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
824 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
827 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
829 struct rte_port *port;
832 static const char *fwd_stats_border = "----------------------";
834 port = &ports[port_id];
835 printf("\n %s Forward statistics for port %-2d %s\n",
836 fwd_stats_border, port_id, fwd_stats_border);
838 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
839 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
841 stats->ipackets, stats->imissed,
842 (uint64_t) (stats->ipackets + stats->imissed));
844 if (cur_fwd_eng == &csum_fwd_engine)
845 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
846 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
847 if ((stats->ierrors + stats->rx_nombuf) > 0) {
848 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
849 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
852 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
854 stats->opackets, port->tx_dropped,
855 (uint64_t) (stats->opackets + port->tx_dropped));
858 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
860 stats->ipackets, stats->imissed,
861 (uint64_t) (stats->ipackets + stats->imissed));
863 if (cur_fwd_eng == &csum_fwd_engine)
864 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
865 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
866 if ((stats->ierrors + stats->rx_nombuf) > 0) {
867 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
868 printf(" RX-nombufs: %14"PRIu64"\n",
872 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
874 stats->opackets, port->tx_dropped,
875 (uint64_t) (stats->opackets + port->tx_dropped));
878 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
880 pkt_burst_stats_display("RX",
881 &port->rx_stream->rx_burst_stats);
883 pkt_burst_stats_display("TX",
884 &port->tx_stream->tx_burst_stats);
887 if (port->rx_queue_stats_mapping_enabled) {
889 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
890 printf(" Stats reg %2d RX-packets:%14"PRIu64
891 " RX-errors:%14"PRIu64
892 " RX-bytes:%14"PRIu64"\n",
893 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
897 if (port->tx_queue_stats_mapping_enabled) {
898 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
899 printf(" Stats reg %2d TX-packets:%14"PRIu64
900 " TX-bytes:%14"PRIu64"\n",
901 i, stats->q_opackets[i], stats->q_obytes[i]);
905 printf(" %s--------------------------------%s\n",
906 fwd_stats_border, fwd_stats_border);
910 fwd_stream_stats_display(streamid_t stream_id)
912 struct fwd_stream *fs;
913 static const char *fwd_top_stats_border = "-------";
915 fs = fwd_streams[stream_id];
916 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
917 (fs->fwd_dropped == 0))
919 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
920 "TX Port=%2d/Queue=%2d %s\n",
921 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
922 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
923 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
924 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
926 /* if checksum mode */
927 if (cur_fwd_eng == &csum_fwd_engine) {
928 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
929 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
932 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
933 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
934 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
939 flush_fwd_rx_queues(void)
941 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
948 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
949 uint64_t timer_period;
951 /* convert to number of cycles */
952 timer_period = rte_get_timer_hz(); /* 1 second timeout */
954 for (j = 0; j < 2; j++) {
955 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
956 for (rxq = 0; rxq < nb_rxq; rxq++) {
957 port_id = fwd_ports_ids[rxp];
959 * testpmd can stuck in the below do while loop
960 * if rte_eth_rx_burst() always returns nonzero
961 * packets. So timer is added to exit this loop
962 * after 1sec timer expiry.
964 prev_tsc = rte_rdtsc();
966 nb_rx = rte_eth_rx_burst(port_id, rxq,
967 pkts_burst, MAX_PKT_BURST);
968 for (i = 0; i < nb_rx; i++)
969 rte_pktmbuf_free(pkts_burst[i]);
971 cur_tsc = rte_rdtsc();
972 diff_tsc = cur_tsc - prev_tsc;
973 timer_tsc += diff_tsc;
974 } while ((nb_rx > 0) &&
975 (timer_tsc < timer_period));
979 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
984 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
986 struct fwd_stream **fsm;
989 #ifdef RTE_LIBRTE_BITRATE
990 uint64_t tics_per_1sec;
992 uint64_t tics_current;
993 uint8_t idx_port, cnt_ports;
995 cnt_ports = rte_eth_dev_count();
996 tics_datum = rte_rdtsc();
997 tics_per_1sec = rte_get_timer_hz();
999 fsm = &fwd_streams[fc->stream_idx];
1000 nb_fs = fc->stream_nb;
1002 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1003 (*pkt_fwd)(fsm[sm_id]);
1004 #ifdef RTE_LIBRTE_BITRATE
1005 if (bitrate_enabled != 0 &&
1006 bitrate_lcore_id == rte_lcore_id()) {
1007 tics_current = rte_rdtsc();
1008 if (tics_current - tics_datum >= tics_per_1sec) {
1009 /* Periodic bitrate calculation */
1011 idx_port < cnt_ports;
1013 rte_stats_bitrate_calc(bitrate_data,
1015 tics_datum = tics_current;
1019 #ifdef RTE_LIBRTE_LATENCY_STATS
1020 if (latencystats_enabled != 0 &&
1021 latencystats_lcore_id == rte_lcore_id())
1022 rte_latencystats_update();
1025 } while (! fc->stopped);
1029 start_pkt_forward_on_core(void *fwd_arg)
1031 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1032 cur_fwd_config.fwd_eng->packet_fwd);
1037 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1038 * Used to start communication flows in network loopback test configurations.
1041 run_one_txonly_burst_on_core(void *fwd_arg)
1043 struct fwd_lcore *fwd_lc;
1044 struct fwd_lcore tmp_lcore;
1046 fwd_lc = (struct fwd_lcore *) fwd_arg;
1047 tmp_lcore = *fwd_lc;
1048 tmp_lcore.stopped = 1;
1049 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1054 * Launch packet forwarding:
1055 * - Setup per-port forwarding context.
1056 * - launch logical cores with their forwarding configuration.
1059 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1061 port_fwd_begin_t port_fwd_begin;
1066 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1067 if (port_fwd_begin != NULL) {
1068 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1069 (*port_fwd_begin)(fwd_ports_ids[i]);
1071 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1072 lc_id = fwd_lcores_cpuids[i];
1073 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1074 fwd_lcores[i]->stopped = 0;
1075 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1076 fwd_lcores[i], lc_id);
1078 printf("launch lcore %u failed - diag=%d\n",
1085 * Launch packet forwarding configuration.
1088 start_packet_forwarding(int with_tx_first)
1090 port_fwd_begin_t port_fwd_begin;
1091 port_fwd_end_t port_fwd_end;
1092 struct rte_port *port;
1097 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1098 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1100 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1101 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1103 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1104 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1105 (!nb_rxq || !nb_txq))
1106 rte_exit(EXIT_FAILURE,
1107 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1108 cur_fwd_eng->fwd_mode_name);
1110 if (all_ports_started() == 0) {
1111 printf("Not all ports were started\n");
1114 if (test_done == 0) {
1115 printf("Packet forwarding already started\n");
1119 if (init_fwd_streams() < 0) {
1120 printf("Fail from init_fwd_streams()\n");
1125 for (i = 0; i < nb_fwd_ports; i++) {
1126 pt_id = fwd_ports_ids[i];
1127 port = &ports[pt_id];
1128 if (!port->dcb_flag) {
1129 printf("In DCB mode, all forwarding ports must "
1130 "be configured in this mode.\n");
1134 if (nb_fwd_lcores == 1) {
1135 printf("In DCB mode,the nb forwarding cores "
1136 "should be larger than 1.\n");
1143 flush_fwd_rx_queues();
1146 pkt_fwd_config_display(&cur_fwd_config);
1147 rxtx_config_display();
1149 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1150 pt_id = fwd_ports_ids[i];
1151 port = &ports[pt_id];
1152 rte_eth_stats_get(pt_id, &port->stats);
1153 port->tx_dropped = 0;
1155 map_port_queue_stats_mapping_registers(pt_id, port);
1157 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1158 fwd_streams[sm_id]->rx_packets = 0;
1159 fwd_streams[sm_id]->tx_packets = 0;
1160 fwd_streams[sm_id]->fwd_dropped = 0;
1161 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1162 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1164 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1165 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1166 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1167 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1168 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1170 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1171 fwd_streams[sm_id]->core_cycles = 0;
1174 if (with_tx_first) {
1175 port_fwd_begin = tx_only_engine.port_fwd_begin;
1176 if (port_fwd_begin != NULL) {
1177 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1178 (*port_fwd_begin)(fwd_ports_ids[i]);
1180 while (with_tx_first--) {
1181 launch_packet_forwarding(
1182 run_one_txonly_burst_on_core);
1183 rte_eal_mp_wait_lcore();
1185 port_fwd_end = tx_only_engine.port_fwd_end;
1186 if (port_fwd_end != NULL) {
1187 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1188 (*port_fwd_end)(fwd_ports_ids[i]);
1191 launch_packet_forwarding(start_pkt_forward_on_core);
1195 stop_packet_forwarding(void)
1197 struct rte_eth_stats stats;
1198 struct rte_port *port;
1199 port_fwd_end_t port_fwd_end;
1204 uint64_t total_recv;
1205 uint64_t total_xmit;
1206 uint64_t total_rx_dropped;
1207 uint64_t total_tx_dropped;
1208 uint64_t total_rx_nombuf;
1209 uint64_t tx_dropped;
1210 uint64_t rx_bad_ip_csum;
1211 uint64_t rx_bad_l4_csum;
1212 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1213 uint64_t fwd_cycles;
1215 static const char *acc_stats_border = "+++++++++++++++";
1218 printf("Packet forwarding not started\n");
1221 printf("Telling cores to stop...");
1222 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1223 fwd_lcores[lc_id]->stopped = 1;
1224 printf("\nWaiting for lcores to finish...\n");
1225 rte_eal_mp_wait_lcore();
1226 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1227 if (port_fwd_end != NULL) {
1228 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1229 pt_id = fwd_ports_ids[i];
1230 (*port_fwd_end)(pt_id);
1233 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1236 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1237 if (cur_fwd_config.nb_fwd_streams >
1238 cur_fwd_config.nb_fwd_ports) {
1239 fwd_stream_stats_display(sm_id);
1240 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1241 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1243 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1245 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1248 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1249 tx_dropped = (uint64_t) (tx_dropped +
1250 fwd_streams[sm_id]->fwd_dropped);
1251 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1254 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1255 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1256 fwd_streams[sm_id]->rx_bad_ip_csum);
1257 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1261 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1262 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1263 fwd_streams[sm_id]->rx_bad_l4_csum);
1264 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1267 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1268 fwd_cycles = (uint64_t) (fwd_cycles +
1269 fwd_streams[sm_id]->core_cycles);
1274 total_rx_dropped = 0;
1275 total_tx_dropped = 0;
1276 total_rx_nombuf = 0;
1277 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1278 pt_id = fwd_ports_ids[i];
1280 port = &ports[pt_id];
1281 rte_eth_stats_get(pt_id, &stats);
1282 stats.ipackets -= port->stats.ipackets;
1283 port->stats.ipackets = 0;
1284 stats.opackets -= port->stats.opackets;
1285 port->stats.opackets = 0;
1286 stats.ibytes -= port->stats.ibytes;
1287 port->stats.ibytes = 0;
1288 stats.obytes -= port->stats.obytes;
1289 port->stats.obytes = 0;
1290 stats.imissed -= port->stats.imissed;
1291 port->stats.imissed = 0;
1292 stats.oerrors -= port->stats.oerrors;
1293 port->stats.oerrors = 0;
1294 stats.rx_nombuf -= port->stats.rx_nombuf;
1295 port->stats.rx_nombuf = 0;
1297 total_recv += stats.ipackets;
1298 total_xmit += stats.opackets;
1299 total_rx_dropped += stats.imissed;
1300 total_tx_dropped += port->tx_dropped;
1301 total_rx_nombuf += stats.rx_nombuf;
1303 fwd_port_stats_display(pt_id, &stats);
1305 printf("\n %s Accumulated forward statistics for all ports"
1307 acc_stats_border, acc_stats_border);
1308 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1310 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1312 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1313 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1314 if (total_rx_nombuf > 0)
1315 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1316 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1318 acc_stats_border, acc_stats_border);
1319 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1321 printf("\n CPU cycles/packet=%u (total cycles="
1322 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1323 (unsigned int)(fwd_cycles / total_recv),
1324 fwd_cycles, total_recv);
1326 printf("\nDone.\n");
1331 dev_set_link_up(portid_t pid)
1333 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1334 printf("\nSet link up fail.\n");
1338 dev_set_link_down(portid_t pid)
1340 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1341 printf("\nSet link down fail.\n");
1345 all_ports_started(void)
1348 struct rte_port *port;
1350 RTE_ETH_FOREACH_DEV(pi) {
1352 /* Check if there is a port which is not started */
1353 if ((port->port_status != RTE_PORT_STARTED) &&
1354 (port->slave_flag == 0))
1358 /* No port is not started */
1363 all_ports_stopped(void)
1366 struct rte_port *port;
1368 RTE_ETH_FOREACH_DEV(pi) {
1370 if ((port->port_status != RTE_PORT_STOPPED) &&
1371 (port->slave_flag == 0))
1379 port_is_started(portid_t port_id)
1381 if (port_id_is_invalid(port_id, ENABLED_WARN))
1384 if (ports[port_id].port_status != RTE_PORT_STARTED)
1391 port_is_closed(portid_t port_id)
1393 if (port_id_is_invalid(port_id, ENABLED_WARN))
1396 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1403 start_port(portid_t pid)
1405 int diag, need_check_link_status = -1;
1408 struct rte_port *port;
1409 struct ether_addr mac_addr;
1410 enum rte_eth_event_type event_type;
1412 if (port_id_is_invalid(pid, ENABLED_WARN))
1417 RTE_ETH_FOREACH_DEV(pi) {
1418 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1421 need_check_link_status = 0;
1423 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1424 RTE_PORT_HANDLING) == 0) {
1425 printf("Port %d is now not stopped\n", pi);
1429 if (port->need_reconfig > 0) {
1430 port->need_reconfig = 0;
1432 printf("Configuring Port %d (socket %u)\n", pi,
1434 /* configure port */
1435 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1438 if (rte_atomic16_cmpset(&(port->port_status),
1439 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1440 printf("Port %d can not be set back "
1441 "to stopped\n", pi);
1442 printf("Fail to configure port %d\n", pi);
1443 /* try to reconfigure port next time */
1444 port->need_reconfig = 1;
1448 if (port->need_reconfig_queues > 0) {
1449 port->need_reconfig_queues = 0;
1450 /* setup tx queues */
1451 for (qi = 0; qi < nb_txq; qi++) {
1452 if ((numa_support) &&
1453 (txring_numa[pi] != NUMA_NO_CONFIG))
1454 diag = rte_eth_tx_queue_setup(pi, qi,
1455 nb_txd,txring_numa[pi],
1458 diag = rte_eth_tx_queue_setup(pi, qi,
1459 nb_txd,port->socket_id,
1465 /* Fail to setup tx queue, return */
1466 if (rte_atomic16_cmpset(&(port->port_status),
1468 RTE_PORT_STOPPED) == 0)
1469 printf("Port %d can not be set back "
1470 "to stopped\n", pi);
1471 printf("Fail to configure port %d tx queues\n", pi);
1472 /* try to reconfigure queues next time */
1473 port->need_reconfig_queues = 1;
1476 /* setup rx queues */
1477 for (qi = 0; qi < nb_rxq; qi++) {
1478 if ((numa_support) &&
1479 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1480 struct rte_mempool * mp =
1481 mbuf_pool_find(rxring_numa[pi]);
1483 printf("Failed to setup RX queue:"
1484 "No mempool allocation"
1485 " on the socket %d\n",
1490 diag = rte_eth_rx_queue_setup(pi, qi,
1491 nb_rxd,rxring_numa[pi],
1492 &(port->rx_conf),mp);
1494 struct rte_mempool *mp =
1495 mbuf_pool_find(port->socket_id);
1497 printf("Failed to setup RX queue:"
1498 "No mempool allocation"
1499 " on the socket %d\n",
1503 diag = rte_eth_rx_queue_setup(pi, qi,
1504 nb_rxd,port->socket_id,
1505 &(port->rx_conf), mp);
1510 /* Fail to setup rx queue, return */
1511 if (rte_atomic16_cmpset(&(port->port_status),
1513 RTE_PORT_STOPPED) == 0)
1514 printf("Port %d can not be set back "
1515 "to stopped\n", pi);
1516 printf("Fail to configure port %d rx queues\n", pi);
1517 /* try to reconfigure queues next time */
1518 port->need_reconfig_queues = 1;
1523 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1524 event_type < RTE_ETH_EVENT_MAX;
1526 diag = rte_eth_dev_callback_register(pi,
1531 printf("Failed to setup even callback for event %d\n",
1538 if (rte_eth_dev_start(pi) < 0) {
1539 printf("Fail to start port %d\n", pi);
1541 /* Fail to setup rx queue, return */
1542 if (rte_atomic16_cmpset(&(port->port_status),
1543 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1544 printf("Port %d can not be set back to "
1549 if (rte_atomic16_cmpset(&(port->port_status),
1550 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1551 printf("Port %d can not be set into started\n", pi);
1553 rte_eth_macaddr_get(pi, &mac_addr);
1554 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1555 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1556 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1557 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1559 /* at least one port started, need checking link status */
1560 need_check_link_status = 1;
1563 if (need_check_link_status == 1 && !no_link_check)
1564 check_all_ports_link_status(RTE_PORT_ALL);
1565 else if (need_check_link_status == 0)
1566 printf("Please stop the ports first\n");
1573 stop_port(portid_t pid)
1576 struct rte_port *port;
1577 int need_check_link_status = 0;
1584 if (port_id_is_invalid(pid, ENABLED_WARN))
1587 printf("Stopping ports...\n");
1589 RTE_ETH_FOREACH_DEV(pi) {
1590 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1593 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1594 printf("Please remove port %d from forwarding configuration.\n", pi);
1598 if (port_is_bonding_slave(pi)) {
1599 printf("Please remove port %d from bonded device.\n", pi);
1604 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1605 RTE_PORT_HANDLING) == 0)
1608 rte_eth_dev_stop(pi);
1610 if (rte_atomic16_cmpset(&(port->port_status),
1611 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1612 printf("Port %d can not be set into stopped\n", pi);
1613 need_check_link_status = 1;
1615 if (need_check_link_status && !no_link_check)
1616 check_all_ports_link_status(RTE_PORT_ALL);
1622 close_port(portid_t pid)
1625 struct rte_port *port;
1627 if (port_id_is_invalid(pid, ENABLED_WARN))
1630 printf("Closing ports...\n");
1632 RTE_ETH_FOREACH_DEV(pi) {
1633 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1636 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1637 printf("Please remove port %d from forwarding configuration.\n", pi);
1641 if (port_is_bonding_slave(pi)) {
1642 printf("Please remove port %d from bonded device.\n", pi);
1647 if (rte_atomic16_cmpset(&(port->port_status),
1648 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1649 printf("Port %d is already closed\n", pi);
1653 if (rte_atomic16_cmpset(&(port->port_status),
1654 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1655 printf("Port %d is now not stopped\n", pi);
1659 if (port->flow_list)
1660 port_flow_flush(pi);
1661 rte_eth_dev_close(pi);
1663 if (rte_atomic16_cmpset(&(port->port_status),
1664 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1665 printf("Port %d cannot be set to closed\n", pi);
1672 attach_port(char *identifier)
1675 unsigned int socket_id;
1677 printf("Attaching a new port...\n");
1679 if (identifier == NULL) {
1680 printf("Invalid parameters are specified\n");
1684 if (rte_eth_dev_attach(identifier, &pi))
1687 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1688 /* if socket_id is invalid, set to 0 */
1689 if (check_socket_id(socket_id) < 0)
1691 reconfig(pi, socket_id);
1692 rte_eth_promiscuous_enable(pi);
1694 nb_ports = rte_eth_dev_count();
1696 ports[pi].port_status = RTE_PORT_STOPPED;
1698 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1703 detach_port(uint8_t port_id)
1705 char name[RTE_ETH_NAME_MAX_LEN];
1707 printf("Detaching a port...\n");
1709 if (!port_is_closed(port_id)) {
1710 printf("Please close port first\n");
1714 if (ports[port_id].flow_list)
1715 port_flow_flush(port_id);
1717 if (rte_eth_dev_detach(port_id, name))
1720 nb_ports = rte_eth_dev_count();
1722 printf("Port '%s' is detached. Now total ports is %d\n",
1734 stop_packet_forwarding();
1736 if (ports != NULL) {
1738 RTE_ETH_FOREACH_DEV(pt_id) {
1739 printf("\nShutting down port %d...\n", pt_id);
1745 printf("\nBye...\n");
1748 typedef void (*cmd_func_t)(void);
1749 struct pmd_test_command {
1750 const char *cmd_name;
1751 cmd_func_t cmd_func;
1754 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1756 /* Check the link status of all ports in up to 9s, and print them finally */
1758 check_all_ports_link_status(uint32_t port_mask)
1760 #define CHECK_INTERVAL 100 /* 100ms */
1761 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1762 uint8_t portid, count, all_ports_up, print_flag = 0;
1763 struct rte_eth_link link;
1765 printf("Checking link statuses...\n");
1767 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1769 RTE_ETH_FOREACH_DEV(portid) {
1770 if ((port_mask & (1 << portid)) == 0)
1772 memset(&link, 0, sizeof(link));
1773 rte_eth_link_get_nowait(portid, &link);
1774 /* print link status if flag set */
1775 if (print_flag == 1) {
1776 if (link.link_status)
1777 printf("Port %d Link Up - speed %u "
1778 "Mbps - %s\n", (uint8_t)portid,
1779 (unsigned)link.link_speed,
1780 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1781 ("full-duplex") : ("half-duplex\n"));
1783 printf("Port %d Link Down\n",
1787 /* clear all_ports_up flag if any link down */
1788 if (link.link_status == ETH_LINK_DOWN) {
1793 /* after finally printing all link status, get out */
1794 if (print_flag == 1)
1797 if (all_ports_up == 0) {
1799 rte_delay_ms(CHECK_INTERVAL);
1802 /* set the print_flag if all ports up or timeout */
1803 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1813 rmv_event_callback(void *arg)
1815 struct rte_eth_dev *dev;
1816 uint8_t port_id = (intptr_t)arg;
1818 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1819 dev = &rte_eth_devices[port_id];
1822 close_port(port_id);
1823 printf("removing device %s\n", dev->device->name);
1824 rte_eal_dev_detach(dev->device);
1825 dev->state = RTE_ETH_DEV_UNUSED;
1828 /* This function is used by the interrupt thread */
1830 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1833 static const char * const event_desc[] = {
1834 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1835 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1836 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1837 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1838 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1839 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1840 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1841 [RTE_ETH_EVENT_MAX] = NULL,
1844 RTE_SET_USED(param);
1845 RTE_SET_USED(ret_param);
1847 if (type >= RTE_ETH_EVENT_MAX) {
1848 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1849 port_id, __func__, type);
1851 } else if (event_print_mask & (UINT32_C(1) << type)) {
1852 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1858 case RTE_ETH_EVENT_INTR_RMV:
1859 if (rte_eal_alarm_set(100000,
1860 rmv_event_callback, (void *)(intptr_t)port_id))
1861 fprintf(stderr, "Could not set up deferred device removal\n");
1870 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1874 uint8_t mapping_found = 0;
1876 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1877 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1878 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1879 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1880 tx_queue_stats_mappings[i].queue_id,
1881 tx_queue_stats_mappings[i].stats_counter_id);
1888 port->tx_queue_stats_mapping_enabled = 1;
1893 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1897 uint8_t mapping_found = 0;
1899 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1900 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1901 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1902 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1903 rx_queue_stats_mappings[i].queue_id,
1904 rx_queue_stats_mappings[i].stats_counter_id);
1911 port->rx_queue_stats_mapping_enabled = 1;
1916 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1920 diag = set_tx_queue_stats_mapping_registers(pi, port);
1922 if (diag == -ENOTSUP) {
1923 port->tx_queue_stats_mapping_enabled = 0;
1924 printf("TX queue stats mapping not supported port id=%d\n", pi);
1927 rte_exit(EXIT_FAILURE,
1928 "set_tx_queue_stats_mapping_registers "
1929 "failed for port id=%d diag=%d\n",
1933 diag = set_rx_queue_stats_mapping_registers(pi, port);
1935 if (diag == -ENOTSUP) {
1936 port->rx_queue_stats_mapping_enabled = 0;
1937 printf("RX queue stats mapping not supported port id=%d\n", pi);
1940 rte_exit(EXIT_FAILURE,
1941 "set_rx_queue_stats_mapping_registers "
1942 "failed for port id=%d diag=%d\n",
1948 rxtx_port_config(struct rte_port *port)
1950 port->rx_conf = port->dev_info.default_rxconf;
1951 port->tx_conf = port->dev_info.default_txconf;
1953 /* Check if any RX/TX parameters have been passed */
1954 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1955 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1957 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1958 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1960 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1961 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1963 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1964 port->rx_conf.rx_free_thresh = rx_free_thresh;
1966 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1967 port->rx_conf.rx_drop_en = rx_drop_en;
1969 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1970 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1972 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1973 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1975 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1976 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1978 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1979 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1981 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1982 port->tx_conf.tx_free_thresh = tx_free_thresh;
1984 if (txq_flags != RTE_PMD_PARAM_UNSET)
1985 port->tx_conf.txq_flags = txq_flags;
1989 init_port_config(void)
1992 struct rte_port *port;
1994 RTE_ETH_FOREACH_DEV(pid) {
1996 port->dev_conf.rxmode = rx_mode;
1997 port->dev_conf.fdir_conf = fdir_conf;
1999 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2000 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2002 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2003 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2006 if (port->dcb_flag == 0) {
2007 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2008 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2010 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2013 rxtx_port_config(port);
2015 rte_eth_macaddr_get(pid, &port->eth_addr);
2017 map_port_queue_stats_mapping_registers(pid, port);
2018 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2019 rte_pmd_ixgbe_bypass_init(pid);
2022 if (lsc_interrupt &&
2023 (rte_eth_devices[pid].data->dev_flags &
2024 RTE_ETH_DEV_INTR_LSC))
2025 port->dev_conf.intr_conf.lsc = 1;
2026 if (rmv_interrupt &&
2027 (rte_eth_devices[pid].data->dev_flags &
2028 RTE_ETH_DEV_INTR_RMV))
2029 port->dev_conf.intr_conf.rmv = 1;
2033 void set_port_slave_flag(portid_t slave_pid)
2035 struct rte_port *port;
2037 port = &ports[slave_pid];
2038 port->slave_flag = 1;
2041 void clear_port_slave_flag(portid_t slave_pid)
2043 struct rte_port *port;
2045 port = &ports[slave_pid];
2046 port->slave_flag = 0;
2049 uint8_t port_is_bonding_slave(portid_t slave_pid)
2051 struct rte_port *port;
2053 port = &ports[slave_pid];
2054 return port->slave_flag;
2057 const uint16_t vlan_tags[] = {
2058 0, 1, 2, 3, 4, 5, 6, 7,
2059 8, 9, 10, 11, 12, 13, 14, 15,
2060 16, 17, 18, 19, 20, 21, 22, 23,
2061 24, 25, 26, 27, 28, 29, 30, 31
2065 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2066 enum dcb_mode_enable dcb_mode,
2067 enum rte_eth_nb_tcs num_tcs,
2073 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2074 * given above, and the number of traffic classes available for use.
2076 if (dcb_mode == DCB_VT_ENABLED) {
2077 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2078 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2079 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2080 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2082 /* VMDQ+DCB RX and TX configurations */
2083 vmdq_rx_conf->enable_default_pool = 0;
2084 vmdq_rx_conf->default_pool = 0;
2085 vmdq_rx_conf->nb_queue_pools =
2086 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2087 vmdq_tx_conf->nb_queue_pools =
2088 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2090 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2091 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2092 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2093 vmdq_rx_conf->pool_map[i].pools =
2094 1 << (i % vmdq_rx_conf->nb_queue_pools);
2096 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2097 vmdq_rx_conf->dcb_tc[i] = i;
2098 vmdq_tx_conf->dcb_tc[i] = i;
2101 /* set DCB mode of RX and TX of multiple queues */
2102 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2103 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2105 struct rte_eth_dcb_rx_conf *rx_conf =
2106 ð_conf->rx_adv_conf.dcb_rx_conf;
2107 struct rte_eth_dcb_tx_conf *tx_conf =
2108 ð_conf->tx_adv_conf.dcb_tx_conf;
2110 rx_conf->nb_tcs = num_tcs;
2111 tx_conf->nb_tcs = num_tcs;
2113 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2114 rx_conf->dcb_tc[i] = i % num_tcs;
2115 tx_conf->dcb_tc[i] = i % num_tcs;
2117 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2118 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2119 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2123 eth_conf->dcb_capability_en =
2124 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2126 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2132 init_port_dcb_config(portid_t pid,
2133 enum dcb_mode_enable dcb_mode,
2134 enum rte_eth_nb_tcs num_tcs,
2137 struct rte_eth_conf port_conf;
2138 struct rte_port *rte_port;
2142 rte_port = &ports[pid];
2144 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2145 /* Enter DCB configuration status */
2148 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2149 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2152 port_conf.rxmode.hw_vlan_filter = 1;
2155 * Write the configuration into the device.
2156 * Set the numbers of RX & TX queues to 0, so
2157 * the RX & TX queues will not be setup.
2159 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2161 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2163 /* If dev_info.vmdq_pool_base is greater than 0,
2164 * the queue id of vmdq pools is started after pf queues.
2166 if (dcb_mode == DCB_VT_ENABLED &&
2167 rte_port->dev_info.vmdq_pool_base > 0) {
2168 printf("VMDQ_DCB multi-queue mode is nonsensical"
2169 " for port %d.", pid);
2173 /* Assume the ports in testpmd have the same dcb capability
2174 * and has the same number of rxq and txq in dcb mode
2176 if (dcb_mode == DCB_VT_ENABLED) {
2177 if (rte_port->dev_info.max_vfs > 0) {
2178 nb_rxq = rte_port->dev_info.nb_rx_queues;
2179 nb_txq = rte_port->dev_info.nb_tx_queues;
2181 nb_rxq = rte_port->dev_info.max_rx_queues;
2182 nb_txq = rte_port->dev_info.max_tx_queues;
2185 /*if vt is disabled, use all pf queues */
2186 if (rte_port->dev_info.vmdq_pool_base == 0) {
2187 nb_rxq = rte_port->dev_info.max_rx_queues;
2188 nb_txq = rte_port->dev_info.max_tx_queues;
2190 nb_rxq = (queueid_t)num_tcs;
2191 nb_txq = (queueid_t)num_tcs;
2195 rx_free_thresh = 64;
2197 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2199 rxtx_port_config(rte_port);
2201 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2202 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2203 rx_vft_set(pid, vlan_tags[i], 1);
2205 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2206 map_port_queue_stats_mapping_registers(pid, rte_port);
2208 rte_port->dcb_flag = 1;
2216 /* Configuration of Ethernet ports. */
2217 ports = rte_zmalloc("testpmd: ports",
2218 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2219 RTE_CACHE_LINE_SIZE);
2220 if (ports == NULL) {
2221 rte_exit(EXIT_FAILURE,
2222 "rte_zmalloc(%d struct rte_port) failed\n",
2238 const char clr[] = { 27, '[', '2', 'J', '\0' };
2239 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2241 /* Clear screen and move to top left */
2242 printf("%s%s", clr, top_left);
2244 printf("\nPort statistics ====================================");
2245 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2246 nic_stats_display(fwd_ports_ids[i]);
2250 signal_handler(int signum)
2252 if (signum == SIGINT || signum == SIGTERM) {
2253 printf("\nSignal %d received, preparing to exit...\n",
2255 #ifdef RTE_LIBRTE_PDUMP
2256 /* uninitialize packet capture framework */
2259 #ifdef RTE_LIBRTE_LATENCY_STATS
2260 rte_latencystats_uninit();
2263 /* exit with the expected status */
2264 signal(signum, SIG_DFL);
2265 kill(getpid(), signum);
2270 main(int argc, char** argv)
2275 signal(SIGINT, signal_handler);
2276 signal(SIGTERM, signal_handler);
2278 diag = rte_eal_init(argc, argv);
2280 rte_panic("Cannot init EAL\n");
2282 #ifdef RTE_LIBRTE_PDUMP
2283 /* initialize packet capture framework */
2284 rte_pdump_init(NULL);
2287 nb_ports = (portid_t) rte_eth_dev_count();
2289 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2291 /* allocate port structures, and init them */
2294 set_def_fwd_config();
2296 rte_panic("Empty set of forwarding logical cores - check the "
2297 "core mask supplied in the command parameters\n");
2299 /* Bitrate/latency stats disabled by default */
2300 #ifdef RTE_LIBRTE_BITRATE
2301 bitrate_enabled = 0;
2303 #ifdef RTE_LIBRTE_LATENCY_STATS
2304 latencystats_enabled = 0;
2310 launch_args_parse(argc, argv);
2312 if (tx_first && interactive)
2313 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2314 "interactive mode.\n");
2315 if (!nb_rxq && !nb_txq)
2316 printf("Warning: Either rx or tx queues should be non-zero\n");
2318 if (nb_rxq > 1 && nb_rxq > nb_txq)
2319 printf("Warning: nb_rxq=%d enables RSS configuration, "
2320 "but nb_txq=%d will prevent to fully test it.\n",
2324 if (start_port(RTE_PORT_ALL) != 0)
2325 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2327 /* set all ports to promiscuous mode by default */
2328 RTE_ETH_FOREACH_DEV(port_id)
2329 rte_eth_promiscuous_enable(port_id);
2331 /* Init metrics library */
2332 rte_metrics_init(rte_socket_id());
2334 #ifdef RTE_LIBRTE_LATENCY_STATS
2335 if (latencystats_enabled != 0) {
2336 int ret = rte_latencystats_init(1, NULL);
2338 printf("Warning: latencystats init()"
2339 " returned error %d\n", ret);
2340 printf("Latencystats running on lcore %d\n",
2341 latencystats_lcore_id);
2345 /* Setup bitrate stats */
2346 #ifdef RTE_LIBRTE_BITRATE
2347 if (bitrate_enabled != 0) {
2348 bitrate_data = rte_stats_bitrate_create();
2349 if (bitrate_data == NULL)
2350 rte_exit(EXIT_FAILURE,
2351 "Could not allocate bitrate data.\n");
2352 rte_stats_bitrate_reg(bitrate_data);
2356 #ifdef RTE_LIBRTE_CMDLINE
2357 if (strlen(cmdline_filename) != 0)
2358 cmdline_read_from_file(cmdline_filename);
2360 if (interactive == 1) {
2362 printf("Start automatic packet forwarding\n");
2363 start_packet_forwarding(0);
2373 printf("No commandline core given, start packet forwarding\n");
2374 start_packet_forwarding(tx_first);
2375 if (stats_period != 0) {
2376 uint64_t prev_time = 0, cur_time, diff_time = 0;
2377 uint64_t timer_period;
2379 /* Convert to number of cycles */
2380 timer_period = stats_period * rte_get_timer_hz();
2383 cur_time = rte_get_timer_cycles();
2384 diff_time += cur_time - prev_time;
2386 if (diff_time >= timer_period) {
2388 /* Reset the timer */
2391 /* Sleep to avoid unnecessary checks */
2392 prev_time = cur_time;
2397 printf("Press enter to exit\n");
2398 rc = read(0, &c, 1);