4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
94 uint16_t verbose_level = 0; /**< Silent by default. */
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
100 char cmdline_filename[PATH_MAX] = {0};
103 * NUMA support configuration.
104 * When set, the NUMA support attempts to dispatch the allocation of the
105 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106 * probed ports among the CPU sockets 0 and 1.
107 * Otherwise, all memory is allocated from CPU socket 0.
109 uint8_t numa_support = 1; /**< numa enabled by default */
112 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
115 uint8_t socket_num = UMA_NO_CONFIG;
118 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
123 * Record the Ethernet address of peer target ports to which packets are
125 * Must be instantiated with the ethernet addresses of peer traffic generator
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
132 * Probed Target Environment.
134 struct rte_port *ports; /**< For all probed ethernet ports. */
135 portid_t nb_ports; /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
140 * Test Forwarding Configuration.
141 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t nb_cfg_ports; /**< Number of configured ports. */
147 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
156 * Forwarding engines.
158 struct fwd_engine * fwd_engines[] = {
167 #ifdef RTE_LIBRTE_IEEE1588
168 &ieee1588_fwd_engine,
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
181 * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
184 * Configuration of packet segments used by the "txonly" processing engine.
186 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
187 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
188 TXONLY_DEF_PACKET_LEN,
190 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
192 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
193 /**< Split policy for packets to TX. */
195 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
196 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
198 /* current configuration is in DCB or not,0 means it is not in DCB mode */
199 uint8_t dcb_config = 0;
201 /* Whether the dcb is in testing status */
202 uint8_t dcb_test = 0;
205 * Configurable number of RX/TX queues.
207 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
208 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 * Configurable number of RX/TX ring descriptors.
213 #define RTE_TEST_RX_DESC_DEFAULT 128
214 #define RTE_TEST_TX_DESC_DEFAULT 512
215 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
216 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
218 #define RTE_PMD_PARAM_UNSET -1
220 * Configurable values of RX and TX ring threshold registers.
223 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
224 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
232 * Configurable value of RX free threshold.
234 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
237 * Configurable value of RX drop enable.
239 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
242 * Configurable value of TX free threshold.
244 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
247 * Configurable value of TX RS bit threshold.
249 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
252 * Configurable value of TX queue flags.
254 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
257 * Receive Side Scaling (RSS) configuration.
259 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
262 * Port topology configuration
264 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
267 * Avoids to flush all the RX streams before starts forwarding.
269 uint8_t no_flush_rx = 0; /* flush by default */
272 * Flow API isolated mode.
274 uint8_t flow_isolate_all;
277 * Avoids to check link status when starting/stopping a port.
279 uint8_t no_link_check = 0; /* check by default */
282 * Enable link status change notification
284 uint8_t lsc_interrupt = 1; /* enabled by default */
287 * Enable device removal notification.
289 uint8_t rmv_interrupt = 1; /* enabled by default */
292 * Display or mask ether events
293 * Default to all events except VF_MBOX
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
303 * NIC bypass mode configuration options.
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
312 #ifdef RTE_LIBRTE_LATENCY_STATS
315 * Set when latency stats is enabled in the commandline
317 uint8_t latencystats_enabled;
320 * Lcore ID to serive latency statistics.
322 lcoreid_t latencystats_lcore_id = -1;
327 * Ethernet device configuration.
329 struct rte_eth_rxmode rx_mode = {
330 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
332 .header_split = 0, /**< Header Split disabled. */
333 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
334 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
335 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
336 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
337 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
338 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
341 struct rte_fdir_conf fdir_conf = {
342 .mode = RTE_FDIR_MODE_NONE,
343 .pballoc = RTE_FDIR_PBALLOC_64K,
344 .status = RTE_FDIR_REPORT_STATUS,
346 .vlan_tci_mask = 0x0,
348 .src_ip = 0xFFFFFFFF,
349 .dst_ip = 0xFFFFFFFF,
352 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355 .src_port_mask = 0xFFFF,
356 .dst_port_mask = 0xFFFF,
357 .mac_addr_byte_mask = 0xFF,
358 .tunnel_type_mask = 1,
359 .tunnel_id_mask = 0xFFFFFFFF,
364 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
366 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
369 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
370 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
372 uint16_t nb_tx_queue_stats_mappings = 0;
373 uint16_t nb_rx_queue_stats_mappings = 0;
375 unsigned int num_sockets = 0;
376 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
378 #ifdef RTE_LIBRTE_BITRATE
379 /* Bitrate statistics */
380 struct rte_stats_bitrates *bitrate_data;
381 lcoreid_t bitrate_lcore_id;
382 uint8_t bitrate_enabled;
385 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
386 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
388 /* Forward function declarations */
389 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 enum rte_eth_event_type type,
393 void *param, void *ret_param);
396 * Check if all the ports are started.
397 * If yes, return positive value. If not, return zero.
399 static int all_ports_started(void);
402 * Helper function to check if socket is already discovered.
403 * If yes, return positive value. If not, return zero.
406 new_socket_id(unsigned int socket_id)
410 for (i = 0; i < num_sockets; i++) {
411 if (socket_ids[i] == socket_id)
418 * Setup default configuration.
421 set_default_fwd_lcores_config(void)
425 unsigned int sock_num;
428 for (i = 0; i < RTE_MAX_LCORE; i++) {
429 sock_num = rte_lcore_to_socket_id(i);
430 if (new_socket_id(sock_num)) {
431 if (num_sockets >= RTE_MAX_NUMA_NODES) {
432 rte_exit(EXIT_FAILURE,
433 "Total sockets greater than %u\n",
436 socket_ids[num_sockets++] = sock_num;
438 if (!rte_lcore_is_enabled(i))
440 if (i == rte_get_master_lcore())
442 fwd_lcores_cpuids[nb_lc++] = i;
444 nb_lcores = (lcoreid_t) nb_lc;
445 nb_cfg_lcores = nb_lcores;
450 set_def_peer_eth_addrs(void)
454 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
455 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
456 peer_eth_addrs[i].addr_bytes[5] = i;
461 set_default_fwd_ports_config(void)
465 for (pt_id = 0; pt_id < nb_ports; pt_id++)
466 fwd_ports_ids[pt_id] = pt_id;
468 nb_cfg_ports = nb_ports;
469 nb_fwd_ports = nb_ports;
473 set_def_fwd_config(void)
475 set_default_fwd_lcores_config();
476 set_def_peer_eth_addrs();
477 set_default_fwd_ports_config();
481 * Configuration initialisation done once at init time.
484 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
485 unsigned int socket_id)
487 char pool_name[RTE_MEMPOOL_NAMESIZE];
488 struct rte_mempool *rte_mp = NULL;
491 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
492 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
495 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
496 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
499 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
500 mb_size, (unsigned) mb_mempool_cache,
501 sizeof(struct rte_pktmbuf_pool_private),
506 if (rte_mempool_populate_anon(rte_mp) == 0) {
507 rte_mempool_free(rte_mp);
511 rte_pktmbuf_pool_init(rte_mp, NULL);
512 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
514 /* wrapper to rte_mempool_create() */
515 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
516 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
520 if (rte_mp == NULL) {
521 rte_exit(EXIT_FAILURE,
522 "Creation of mbuf pool for socket %u failed: %s\n",
523 socket_id, rte_strerror(rte_errno));
524 } else if (verbose_level > 0) {
525 rte_mempool_dump(stdout, rte_mp);
530 * Check given socket id is valid or not with NUMA mode,
531 * if valid, return 0, else return -1
534 check_socket_id(const unsigned int socket_id)
536 static int warning_once = 0;
538 if (new_socket_id(socket_id)) {
539 if (!warning_once && numa_support)
540 printf("Warning: NUMA should be configured manually by"
541 " using --port-numa-config and"
542 " --ring-numa-config parameters along with"
554 struct rte_port *port;
555 struct rte_mempool *mbp;
556 unsigned int nb_mbuf_per_pool;
558 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
559 struct rte_gro_param gro_param;
561 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
564 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
565 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
566 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
569 /* Configuration of logical cores. */
570 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
571 sizeof(struct fwd_lcore *) * nb_lcores,
572 RTE_CACHE_LINE_SIZE);
573 if (fwd_lcores == NULL) {
574 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
575 "failed\n", nb_lcores);
577 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
578 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
579 sizeof(struct fwd_lcore),
580 RTE_CACHE_LINE_SIZE);
581 if (fwd_lcores[lc_id] == NULL) {
582 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
585 fwd_lcores[lc_id]->cpuid_idx = lc_id;
588 RTE_ETH_FOREACH_DEV(pid) {
590 rte_eth_dev_info_get(pid, &port->dev_info);
593 if (port_numa[pid] != NUMA_NO_CONFIG)
594 port_per_socket[port_numa[pid]]++;
596 uint32_t socket_id = rte_eth_dev_socket_id(pid);
598 /* if socket_id is invalid, set to 0 */
599 if (check_socket_id(socket_id) < 0)
601 port_per_socket[socket_id]++;
605 /* set flag to initialize port/queue */
606 port->need_reconfig = 1;
607 port->need_reconfig_queues = 1;
611 * Create pools of mbuf.
612 * If NUMA support is disabled, create a single pool of mbuf in
613 * socket 0 memory by default.
614 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
616 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
617 * nb_txd can be configured at run time.
619 if (param_total_num_mbufs)
620 nb_mbuf_per_pool = param_total_num_mbufs;
622 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
623 (nb_lcores * mb_mempool_cache) +
624 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
625 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
631 for (i = 0; i < num_sockets; i++)
632 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
635 if (socket_num == UMA_NO_CONFIG)
636 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
638 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
645 * Records which Mbuf pool to use by each logical core, if needed.
647 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
648 mbp = mbuf_pool_find(
649 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
652 mbp = mbuf_pool_find(0);
653 fwd_lcores[lc_id]->mbp = mbp;
656 /* Configuration of packet forwarding streams. */
657 if (init_fwd_streams() < 0)
658 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
662 /* create a gro context for each lcore */
663 gro_param.gro_types = RTE_GRO_TCP_IPV4;
664 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
665 gro_param.max_item_per_flow = MAX_PKT_BURST;
666 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
667 gro_param.socket_id = rte_lcore_to_socket_id(
668 fwd_lcores_cpuids[lc_id]);
669 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
670 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
671 rte_exit(EXIT_FAILURE,
672 "rte_gro_ctx_create() failed\n");
679 reconfig(portid_t new_port_id, unsigned socket_id)
681 struct rte_port *port;
683 /* Reconfiguration of Ethernet ports. */
684 port = &ports[new_port_id];
685 rte_eth_dev_info_get(new_port_id, &port->dev_info);
687 /* set flag to initialize port/queue */
688 port->need_reconfig = 1;
689 port->need_reconfig_queues = 1;
690 port->socket_id = socket_id;
697 init_fwd_streams(void)
700 struct rte_port *port;
701 streamid_t sm_id, nb_fwd_streams_new;
704 /* set socket id according to numa or not */
705 RTE_ETH_FOREACH_DEV(pid) {
707 if (nb_rxq > port->dev_info.max_rx_queues) {
708 printf("Fail: nb_rxq(%d) is greater than "
709 "max_rx_queues(%d)\n", nb_rxq,
710 port->dev_info.max_rx_queues);
713 if (nb_txq > port->dev_info.max_tx_queues) {
714 printf("Fail: nb_txq(%d) is greater than "
715 "max_tx_queues(%d)\n", nb_txq,
716 port->dev_info.max_tx_queues);
720 if (port_numa[pid] != NUMA_NO_CONFIG)
721 port->socket_id = port_numa[pid];
723 port->socket_id = rte_eth_dev_socket_id(pid);
725 /* if socket_id is invalid, set to 0 */
726 if (check_socket_id(port->socket_id) < 0)
731 if (socket_num == UMA_NO_CONFIG)
734 port->socket_id = socket_num;
738 q = RTE_MAX(nb_rxq, nb_txq);
740 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
743 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
744 if (nb_fwd_streams_new == nb_fwd_streams)
747 if (fwd_streams != NULL) {
748 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
749 if (fwd_streams[sm_id] == NULL)
751 rte_free(fwd_streams[sm_id]);
752 fwd_streams[sm_id] = NULL;
754 rte_free(fwd_streams);
759 nb_fwd_streams = nb_fwd_streams_new;
760 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
761 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
762 if (fwd_streams == NULL)
763 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
764 "failed\n", nb_fwd_streams);
766 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
767 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
768 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
769 if (fwd_streams[sm_id] == NULL)
770 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
777 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
779 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
781 unsigned int total_burst;
782 unsigned int nb_burst;
783 unsigned int burst_stats[3];
784 uint16_t pktnb_stats[3];
786 int burst_percent[3];
789 * First compute the total number of packet bursts and the
790 * two highest numbers of bursts of the same number of packets.
793 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
794 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
795 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
796 nb_burst = pbs->pkt_burst_spread[nb_pkt];
799 total_burst += nb_burst;
800 if (nb_burst > burst_stats[0]) {
801 burst_stats[1] = burst_stats[0];
802 pktnb_stats[1] = pktnb_stats[0];
803 burst_stats[0] = nb_burst;
804 pktnb_stats[0] = nb_pkt;
807 if (total_burst == 0)
809 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
810 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
811 burst_percent[0], (int) pktnb_stats[0]);
812 if (burst_stats[0] == total_burst) {
816 if (burst_stats[0] + burst_stats[1] == total_burst) {
817 printf(" + %d%% of %d pkts]\n",
818 100 - burst_percent[0], pktnb_stats[1]);
821 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
822 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
823 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
824 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
827 printf(" + %d%% of %d pkts + %d%% of others]\n",
828 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
830 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
833 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
835 struct rte_port *port;
838 static const char *fwd_stats_border = "----------------------";
840 port = &ports[port_id];
841 printf("\n %s Forward statistics for port %-2d %s\n",
842 fwd_stats_border, port_id, fwd_stats_border);
844 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
845 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
847 stats->ipackets, stats->imissed,
848 (uint64_t) (stats->ipackets + stats->imissed));
850 if (cur_fwd_eng == &csum_fwd_engine)
851 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
852 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
853 if ((stats->ierrors + stats->rx_nombuf) > 0) {
854 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
855 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
858 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
860 stats->opackets, port->tx_dropped,
861 (uint64_t) (stats->opackets + port->tx_dropped));
864 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
866 stats->ipackets, stats->imissed,
867 (uint64_t) (stats->ipackets + stats->imissed));
869 if (cur_fwd_eng == &csum_fwd_engine)
870 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
871 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
872 if ((stats->ierrors + stats->rx_nombuf) > 0) {
873 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
874 printf(" RX-nombufs: %14"PRIu64"\n",
878 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
880 stats->opackets, port->tx_dropped,
881 (uint64_t) (stats->opackets + port->tx_dropped));
884 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
886 pkt_burst_stats_display("RX",
887 &port->rx_stream->rx_burst_stats);
889 pkt_burst_stats_display("TX",
890 &port->tx_stream->tx_burst_stats);
893 if (port->rx_queue_stats_mapping_enabled) {
895 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
896 printf(" Stats reg %2d RX-packets:%14"PRIu64
897 " RX-errors:%14"PRIu64
898 " RX-bytes:%14"PRIu64"\n",
899 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
903 if (port->tx_queue_stats_mapping_enabled) {
904 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
905 printf(" Stats reg %2d TX-packets:%14"PRIu64
906 " TX-bytes:%14"PRIu64"\n",
907 i, stats->q_opackets[i], stats->q_obytes[i]);
911 printf(" %s--------------------------------%s\n",
912 fwd_stats_border, fwd_stats_border);
916 fwd_stream_stats_display(streamid_t stream_id)
918 struct fwd_stream *fs;
919 static const char *fwd_top_stats_border = "-------";
921 fs = fwd_streams[stream_id];
922 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
923 (fs->fwd_dropped == 0))
925 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
926 "TX Port=%2d/Queue=%2d %s\n",
927 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
928 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
929 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
930 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
932 /* if checksum mode */
933 if (cur_fwd_eng == &csum_fwd_engine) {
934 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
935 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
938 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
939 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
940 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
945 flush_fwd_rx_queues(void)
947 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
954 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
955 uint64_t timer_period;
957 /* convert to number of cycles */
958 timer_period = rte_get_timer_hz(); /* 1 second timeout */
960 for (j = 0; j < 2; j++) {
961 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
962 for (rxq = 0; rxq < nb_rxq; rxq++) {
963 port_id = fwd_ports_ids[rxp];
965 * testpmd can stuck in the below do while loop
966 * if rte_eth_rx_burst() always returns nonzero
967 * packets. So timer is added to exit this loop
968 * after 1sec timer expiry.
970 prev_tsc = rte_rdtsc();
972 nb_rx = rte_eth_rx_burst(port_id, rxq,
973 pkts_burst, MAX_PKT_BURST);
974 for (i = 0; i < nb_rx; i++)
975 rte_pktmbuf_free(pkts_burst[i]);
977 cur_tsc = rte_rdtsc();
978 diff_tsc = cur_tsc - prev_tsc;
979 timer_tsc += diff_tsc;
980 } while ((nb_rx > 0) &&
981 (timer_tsc < timer_period));
985 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
990 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
992 struct fwd_stream **fsm;
995 #ifdef RTE_LIBRTE_BITRATE
996 uint64_t tics_per_1sec;
998 uint64_t tics_current;
999 uint8_t idx_port, cnt_ports;
1001 cnt_ports = rte_eth_dev_count();
1002 tics_datum = rte_rdtsc();
1003 tics_per_1sec = rte_get_timer_hz();
1005 fsm = &fwd_streams[fc->stream_idx];
1006 nb_fs = fc->stream_nb;
1008 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1009 (*pkt_fwd)(fsm[sm_id]);
1010 #ifdef RTE_LIBRTE_BITRATE
1011 if (bitrate_enabled != 0 &&
1012 bitrate_lcore_id == rte_lcore_id()) {
1013 tics_current = rte_rdtsc();
1014 if (tics_current - tics_datum >= tics_per_1sec) {
1015 /* Periodic bitrate calculation */
1017 idx_port < cnt_ports;
1019 rte_stats_bitrate_calc(bitrate_data,
1021 tics_datum = tics_current;
1025 #ifdef RTE_LIBRTE_LATENCY_STATS
1026 if (latencystats_enabled != 0 &&
1027 latencystats_lcore_id == rte_lcore_id())
1028 rte_latencystats_update();
1031 } while (! fc->stopped);
1035 start_pkt_forward_on_core(void *fwd_arg)
1037 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1038 cur_fwd_config.fwd_eng->packet_fwd);
1043 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1044 * Used to start communication flows in network loopback test configurations.
1047 run_one_txonly_burst_on_core(void *fwd_arg)
1049 struct fwd_lcore *fwd_lc;
1050 struct fwd_lcore tmp_lcore;
1052 fwd_lc = (struct fwd_lcore *) fwd_arg;
1053 tmp_lcore = *fwd_lc;
1054 tmp_lcore.stopped = 1;
1055 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1060 * Launch packet forwarding:
1061 * - Setup per-port forwarding context.
1062 * - launch logical cores with their forwarding configuration.
1065 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1067 port_fwd_begin_t port_fwd_begin;
1072 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1073 if (port_fwd_begin != NULL) {
1074 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1075 (*port_fwd_begin)(fwd_ports_ids[i]);
1077 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1078 lc_id = fwd_lcores_cpuids[i];
1079 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1080 fwd_lcores[i]->stopped = 0;
1081 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1082 fwd_lcores[i], lc_id);
1084 printf("launch lcore %u failed - diag=%d\n",
1091 * Launch packet forwarding configuration.
1094 start_packet_forwarding(int with_tx_first)
1096 port_fwd_begin_t port_fwd_begin;
1097 port_fwd_end_t port_fwd_end;
1098 struct rte_port *port;
1103 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1104 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1106 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1107 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1109 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1110 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1111 (!nb_rxq || !nb_txq))
1112 rte_exit(EXIT_FAILURE,
1113 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1114 cur_fwd_eng->fwd_mode_name);
1116 if (all_ports_started() == 0) {
1117 printf("Not all ports were started\n");
1120 if (test_done == 0) {
1121 printf("Packet forwarding already started\n");
1125 if (init_fwd_streams() < 0) {
1126 printf("Fail from init_fwd_streams()\n");
1131 for (i = 0; i < nb_fwd_ports; i++) {
1132 pt_id = fwd_ports_ids[i];
1133 port = &ports[pt_id];
1134 if (!port->dcb_flag) {
1135 printf("In DCB mode, all forwarding ports must "
1136 "be configured in this mode.\n");
1140 if (nb_fwd_lcores == 1) {
1141 printf("In DCB mode,the nb forwarding cores "
1142 "should be larger than 1.\n");
1149 flush_fwd_rx_queues();
1152 pkt_fwd_config_display(&cur_fwd_config);
1153 rxtx_config_display();
1155 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1156 pt_id = fwd_ports_ids[i];
1157 port = &ports[pt_id];
1158 rte_eth_stats_get(pt_id, &port->stats);
1159 port->tx_dropped = 0;
1161 map_port_queue_stats_mapping_registers(pt_id, port);
1163 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1164 fwd_streams[sm_id]->rx_packets = 0;
1165 fwd_streams[sm_id]->tx_packets = 0;
1166 fwd_streams[sm_id]->fwd_dropped = 0;
1167 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1168 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1170 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1171 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1172 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1173 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1174 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1176 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1177 fwd_streams[sm_id]->core_cycles = 0;
1180 if (with_tx_first) {
1181 port_fwd_begin = tx_only_engine.port_fwd_begin;
1182 if (port_fwd_begin != NULL) {
1183 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1184 (*port_fwd_begin)(fwd_ports_ids[i]);
1186 while (with_tx_first--) {
1187 launch_packet_forwarding(
1188 run_one_txonly_burst_on_core);
1189 rte_eal_mp_wait_lcore();
1191 port_fwd_end = tx_only_engine.port_fwd_end;
1192 if (port_fwd_end != NULL) {
1193 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1194 (*port_fwd_end)(fwd_ports_ids[i]);
1197 launch_packet_forwarding(start_pkt_forward_on_core);
1201 stop_packet_forwarding(void)
1203 struct rte_eth_stats stats;
1204 struct rte_port *port;
1205 port_fwd_end_t port_fwd_end;
1210 uint64_t total_recv;
1211 uint64_t total_xmit;
1212 uint64_t total_rx_dropped;
1213 uint64_t total_tx_dropped;
1214 uint64_t total_rx_nombuf;
1215 uint64_t tx_dropped;
1216 uint64_t rx_bad_ip_csum;
1217 uint64_t rx_bad_l4_csum;
1218 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1219 uint64_t fwd_cycles;
1222 static const char *acc_stats_border = "+++++++++++++++";
1225 printf("Packet forwarding not started\n");
1228 printf("Telling cores to stop...");
1229 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1230 fwd_lcores[lc_id]->stopped = 1;
1231 printf("\nWaiting for lcores to finish...\n");
1232 rte_eal_mp_wait_lcore();
1233 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1234 if (port_fwd_end != NULL) {
1235 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1236 pt_id = fwd_ports_ids[i];
1237 (*port_fwd_end)(pt_id);
1240 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1243 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1244 if (cur_fwd_config.nb_fwd_streams >
1245 cur_fwd_config.nb_fwd_ports) {
1246 fwd_stream_stats_display(sm_id);
1247 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1248 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1250 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1252 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1255 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1256 tx_dropped = (uint64_t) (tx_dropped +
1257 fwd_streams[sm_id]->fwd_dropped);
1258 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1261 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1262 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1263 fwd_streams[sm_id]->rx_bad_ip_csum);
1264 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1268 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1269 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1270 fwd_streams[sm_id]->rx_bad_l4_csum);
1271 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1274 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1275 fwd_cycles = (uint64_t) (fwd_cycles +
1276 fwd_streams[sm_id]->core_cycles);
1281 total_rx_dropped = 0;
1282 total_tx_dropped = 0;
1283 total_rx_nombuf = 0;
1284 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1285 pt_id = fwd_ports_ids[i];
1287 port = &ports[pt_id];
1288 rte_eth_stats_get(pt_id, &stats);
1289 stats.ipackets -= port->stats.ipackets;
1290 port->stats.ipackets = 0;
1291 stats.opackets -= port->stats.opackets;
1292 port->stats.opackets = 0;
1293 stats.ibytes -= port->stats.ibytes;
1294 port->stats.ibytes = 0;
1295 stats.obytes -= port->stats.obytes;
1296 port->stats.obytes = 0;
1297 stats.imissed -= port->stats.imissed;
1298 port->stats.imissed = 0;
1299 stats.oerrors -= port->stats.oerrors;
1300 port->stats.oerrors = 0;
1301 stats.rx_nombuf -= port->stats.rx_nombuf;
1302 port->stats.rx_nombuf = 0;
1304 total_recv += stats.ipackets;
1305 total_xmit += stats.opackets;
1306 total_rx_dropped += stats.imissed;
1307 total_tx_dropped += port->tx_dropped;
1308 total_rx_nombuf += stats.rx_nombuf;
1310 fwd_port_stats_display(pt_id, &stats);
1313 printf("\n %s Accumulated forward statistics for all ports"
1315 acc_stats_border, acc_stats_border);
1316 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1318 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1320 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1321 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1322 if (total_rx_nombuf > 0)
1323 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1324 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1326 acc_stats_border, acc_stats_border);
1327 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1329 printf("\n CPU cycles/packet=%u (total cycles="
1330 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1331 (unsigned int)(fwd_cycles / total_recv),
1332 fwd_cycles, total_recv);
1334 printf("\nDone.\n");
1339 dev_set_link_up(portid_t pid)
1341 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1342 printf("\nSet link up fail.\n");
1346 dev_set_link_down(portid_t pid)
1348 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1349 printf("\nSet link down fail.\n");
1353 all_ports_started(void)
1356 struct rte_port *port;
1358 RTE_ETH_FOREACH_DEV(pi) {
1360 /* Check if there is a port which is not started */
1361 if ((port->port_status != RTE_PORT_STARTED) &&
1362 (port->slave_flag == 0))
1366 /* No port is not started */
1371 all_ports_stopped(void)
1374 struct rte_port *port;
1376 RTE_ETH_FOREACH_DEV(pi) {
1378 if ((port->port_status != RTE_PORT_STOPPED) &&
1379 (port->slave_flag == 0))
1387 port_is_started(portid_t port_id)
1389 if (port_id_is_invalid(port_id, ENABLED_WARN))
1392 if (ports[port_id].port_status != RTE_PORT_STARTED)
1399 port_is_closed(portid_t port_id)
1401 if (port_id_is_invalid(port_id, ENABLED_WARN))
1404 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1411 start_port(portid_t pid)
1413 int diag, need_check_link_status = -1;
1416 struct rte_port *port;
1417 struct ether_addr mac_addr;
1418 enum rte_eth_event_type event_type;
1420 if (port_id_is_invalid(pid, ENABLED_WARN))
1425 RTE_ETH_FOREACH_DEV(pi) {
1426 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1429 need_check_link_status = 0;
1431 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1432 RTE_PORT_HANDLING) == 0) {
1433 printf("Port %d is now not stopped\n", pi);
1437 if (port->need_reconfig > 0) {
1438 port->need_reconfig = 0;
1440 if (flow_isolate_all) {
1441 int ret = port_flow_isolate(pi, 1);
1443 printf("Failed to apply isolated"
1444 " mode on port %d\n", pi);
1449 printf("Configuring Port %d (socket %u)\n", pi,
1451 /* configure port */
1452 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1455 if (rte_atomic16_cmpset(&(port->port_status),
1456 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1457 printf("Port %d can not be set back "
1458 "to stopped\n", pi);
1459 printf("Fail to configure port %d\n", pi);
1460 /* try to reconfigure port next time */
1461 port->need_reconfig = 1;
1465 if (port->need_reconfig_queues > 0) {
1466 port->need_reconfig_queues = 0;
1467 /* setup tx queues */
1468 for (qi = 0; qi < nb_txq; qi++) {
1469 if ((numa_support) &&
1470 (txring_numa[pi] != NUMA_NO_CONFIG))
1471 diag = rte_eth_tx_queue_setup(pi, qi,
1472 nb_txd,txring_numa[pi],
1475 diag = rte_eth_tx_queue_setup(pi, qi,
1476 nb_txd,port->socket_id,
1482 /* Fail to setup tx queue, return */
1483 if (rte_atomic16_cmpset(&(port->port_status),
1485 RTE_PORT_STOPPED) == 0)
1486 printf("Port %d can not be set back "
1487 "to stopped\n", pi);
1488 printf("Fail to configure port %d tx queues\n", pi);
1489 /* try to reconfigure queues next time */
1490 port->need_reconfig_queues = 1;
1493 /* setup rx queues */
1494 for (qi = 0; qi < nb_rxq; qi++) {
1495 if ((numa_support) &&
1496 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1497 struct rte_mempool * mp =
1498 mbuf_pool_find(rxring_numa[pi]);
1500 printf("Failed to setup RX queue:"
1501 "No mempool allocation"
1502 " on the socket %d\n",
1507 diag = rte_eth_rx_queue_setup(pi, qi,
1508 nb_rxd,rxring_numa[pi],
1509 &(port->rx_conf),mp);
1511 struct rte_mempool *mp =
1512 mbuf_pool_find(port->socket_id);
1514 printf("Failed to setup RX queue:"
1515 "No mempool allocation"
1516 " on the socket %d\n",
1520 diag = rte_eth_rx_queue_setup(pi, qi,
1521 nb_rxd,port->socket_id,
1522 &(port->rx_conf), mp);
1527 /* Fail to setup rx queue, return */
1528 if (rte_atomic16_cmpset(&(port->port_status),
1530 RTE_PORT_STOPPED) == 0)
1531 printf("Port %d can not be set back "
1532 "to stopped\n", pi);
1533 printf("Fail to configure port %d rx queues\n", pi);
1534 /* try to reconfigure queues next time */
1535 port->need_reconfig_queues = 1;
1540 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1541 event_type < RTE_ETH_EVENT_MAX;
1543 diag = rte_eth_dev_callback_register(pi,
1548 printf("Failed to setup even callback for event %d\n",
1555 if (rte_eth_dev_start(pi) < 0) {
1556 printf("Fail to start port %d\n", pi);
1558 /* Fail to setup rx queue, return */
1559 if (rte_atomic16_cmpset(&(port->port_status),
1560 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1561 printf("Port %d can not be set back to "
1566 if (rte_atomic16_cmpset(&(port->port_status),
1567 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1568 printf("Port %d can not be set into started\n", pi);
1570 rte_eth_macaddr_get(pi, &mac_addr);
1571 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1572 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1573 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1574 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1576 /* at least one port started, need checking link status */
1577 need_check_link_status = 1;
1580 if (need_check_link_status == 1 && !no_link_check)
1581 check_all_ports_link_status(RTE_PORT_ALL);
1582 else if (need_check_link_status == 0)
1583 printf("Please stop the ports first\n");
1590 stop_port(portid_t pid)
1593 struct rte_port *port;
1594 int need_check_link_status = 0;
1601 if (port_id_is_invalid(pid, ENABLED_WARN))
1604 printf("Stopping ports...\n");
1606 RTE_ETH_FOREACH_DEV(pi) {
1607 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1610 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1611 printf("Please remove port %d from forwarding configuration.\n", pi);
1615 if (port_is_bonding_slave(pi)) {
1616 printf("Please remove port %d from bonded device.\n", pi);
1621 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1622 RTE_PORT_HANDLING) == 0)
1625 rte_eth_dev_stop(pi);
1627 if (rte_atomic16_cmpset(&(port->port_status),
1628 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1629 printf("Port %d can not be set into stopped\n", pi);
1630 need_check_link_status = 1;
1632 if (need_check_link_status && !no_link_check)
1633 check_all_ports_link_status(RTE_PORT_ALL);
1639 close_port(portid_t pid)
1642 struct rte_port *port;
1644 if (port_id_is_invalid(pid, ENABLED_WARN))
1647 printf("Closing ports...\n");
1649 RTE_ETH_FOREACH_DEV(pi) {
1650 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1653 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1654 printf("Please remove port %d from forwarding configuration.\n", pi);
1658 if (port_is_bonding_slave(pi)) {
1659 printf("Please remove port %d from bonded device.\n", pi);
1664 if (rte_atomic16_cmpset(&(port->port_status),
1665 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1666 printf("Port %d is already closed\n", pi);
1670 if (rte_atomic16_cmpset(&(port->port_status),
1671 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1672 printf("Port %d is now not stopped\n", pi);
1676 if (port->flow_list)
1677 port_flow_flush(pi);
1678 rte_eth_dev_close(pi);
1680 if (rte_atomic16_cmpset(&(port->port_status),
1681 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1682 printf("Port %d cannot be set to closed\n", pi);
1689 reset_port(portid_t pid)
1693 struct rte_port *port;
1695 if (port_id_is_invalid(pid, ENABLED_WARN))
1698 printf("Resetting ports...\n");
1700 RTE_ETH_FOREACH_DEV(pi) {
1701 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1704 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1705 printf("Please remove port %d from forwarding "
1706 "configuration.\n", pi);
1710 if (port_is_bonding_slave(pi)) {
1711 printf("Please remove port %d from bonded device.\n",
1716 diag = rte_eth_dev_reset(pi);
1719 port->need_reconfig = 1;
1720 port->need_reconfig_queues = 1;
1722 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1730 attach_port(char *identifier)
1733 unsigned int socket_id;
1735 printf("Attaching a new port...\n");
1737 if (identifier == NULL) {
1738 printf("Invalid parameters are specified\n");
1742 if (rte_eth_dev_attach(identifier, &pi))
1745 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1746 /* if socket_id is invalid, set to 0 */
1747 if (check_socket_id(socket_id) < 0)
1749 reconfig(pi, socket_id);
1750 rte_eth_promiscuous_enable(pi);
1752 nb_ports = rte_eth_dev_count();
1754 ports[pi].port_status = RTE_PORT_STOPPED;
1756 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1761 detach_port(uint8_t port_id)
1763 char name[RTE_ETH_NAME_MAX_LEN];
1765 printf("Detaching a port...\n");
1767 if (!port_is_closed(port_id)) {
1768 printf("Please close port first\n");
1772 if (ports[port_id].flow_list)
1773 port_flow_flush(port_id);
1775 if (rte_eth_dev_detach(port_id, name)) {
1776 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1780 nb_ports = rte_eth_dev_count();
1782 printf("Port '%s' is detached. Now total ports is %d\n",
1794 stop_packet_forwarding();
1796 if (ports != NULL) {
1798 RTE_ETH_FOREACH_DEV(pt_id) {
1799 printf("\nShutting down port %d...\n", pt_id);
1805 printf("\nBye...\n");
1808 typedef void (*cmd_func_t)(void);
1809 struct pmd_test_command {
1810 const char *cmd_name;
1811 cmd_func_t cmd_func;
1814 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1816 /* Check the link status of all ports in up to 9s, and print them finally */
1818 check_all_ports_link_status(uint32_t port_mask)
1820 #define CHECK_INTERVAL 100 /* 100ms */
1821 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1823 uint8_t count, all_ports_up, print_flag = 0;
1824 struct rte_eth_link link;
1826 printf("Checking link statuses...\n");
1828 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1830 RTE_ETH_FOREACH_DEV(portid) {
1831 if ((port_mask & (1 << portid)) == 0)
1833 memset(&link, 0, sizeof(link));
1834 rte_eth_link_get_nowait(portid, &link);
1835 /* print link status if flag set */
1836 if (print_flag == 1) {
1837 if (link.link_status)
1839 "Port%d Link Up. speed %u Mbps- %s\n",
1840 portid, link.link_speed,
1841 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1842 ("full-duplex") : ("half-duplex\n"));
1844 printf("Port %d Link Down\n", portid);
1847 /* clear all_ports_up flag if any link down */
1848 if (link.link_status == ETH_LINK_DOWN) {
1853 /* after finally printing all link status, get out */
1854 if (print_flag == 1)
1857 if (all_ports_up == 0) {
1859 rte_delay_ms(CHECK_INTERVAL);
1862 /* set the print_flag if all ports up or timeout */
1863 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1873 rmv_event_callback(void *arg)
1875 struct rte_eth_dev *dev;
1876 uint8_t port_id = (intptr_t)arg;
1878 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1879 dev = &rte_eth_devices[port_id];
1882 close_port(port_id);
1883 printf("removing device %s\n", dev->device->name);
1884 if (rte_eal_dev_detach(dev->device))
1885 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1889 /* This function is used by the interrupt thread */
1891 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1894 static const char * const event_desc[] = {
1895 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1896 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1897 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1898 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1899 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1900 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1901 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1902 [RTE_ETH_EVENT_MAX] = NULL,
1905 RTE_SET_USED(param);
1906 RTE_SET_USED(ret_param);
1908 if (type >= RTE_ETH_EVENT_MAX) {
1909 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1910 port_id, __func__, type);
1912 } else if (event_print_mask & (UINT32_C(1) << type)) {
1913 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1919 case RTE_ETH_EVENT_INTR_RMV:
1920 if (rte_eal_alarm_set(100000,
1921 rmv_event_callback, (void *)(intptr_t)port_id))
1922 fprintf(stderr, "Could not set up deferred device removal\n");
1931 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1935 uint8_t mapping_found = 0;
1937 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1938 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1939 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1940 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1941 tx_queue_stats_mappings[i].queue_id,
1942 tx_queue_stats_mappings[i].stats_counter_id);
1949 port->tx_queue_stats_mapping_enabled = 1;
1954 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1958 uint8_t mapping_found = 0;
1960 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1961 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1962 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1963 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1964 rx_queue_stats_mappings[i].queue_id,
1965 rx_queue_stats_mappings[i].stats_counter_id);
1972 port->rx_queue_stats_mapping_enabled = 1;
1977 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1981 diag = set_tx_queue_stats_mapping_registers(pi, port);
1983 if (diag == -ENOTSUP) {
1984 port->tx_queue_stats_mapping_enabled = 0;
1985 printf("TX queue stats mapping not supported port id=%d\n", pi);
1988 rte_exit(EXIT_FAILURE,
1989 "set_tx_queue_stats_mapping_registers "
1990 "failed for port id=%d diag=%d\n",
1994 diag = set_rx_queue_stats_mapping_registers(pi, port);
1996 if (diag == -ENOTSUP) {
1997 port->rx_queue_stats_mapping_enabled = 0;
1998 printf("RX queue stats mapping not supported port id=%d\n", pi);
2001 rte_exit(EXIT_FAILURE,
2002 "set_rx_queue_stats_mapping_registers "
2003 "failed for port id=%d diag=%d\n",
2009 rxtx_port_config(struct rte_port *port)
2011 port->rx_conf = port->dev_info.default_rxconf;
2012 port->tx_conf = port->dev_info.default_txconf;
2014 /* Check if any RX/TX parameters have been passed */
2015 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2016 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2018 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2019 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2021 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2022 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2024 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2025 port->rx_conf.rx_free_thresh = rx_free_thresh;
2027 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2028 port->rx_conf.rx_drop_en = rx_drop_en;
2030 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2031 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2033 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2034 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2036 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2037 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2039 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2040 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2042 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2043 port->tx_conf.tx_free_thresh = tx_free_thresh;
2045 if (txq_flags != RTE_PMD_PARAM_UNSET)
2046 port->tx_conf.txq_flags = txq_flags;
2050 init_port_config(void)
2053 struct rte_port *port;
2055 RTE_ETH_FOREACH_DEV(pid) {
2057 port->dev_conf.rxmode = rx_mode;
2058 port->dev_conf.fdir_conf = fdir_conf;
2060 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2061 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2063 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2064 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2067 if (port->dcb_flag == 0) {
2068 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2069 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2071 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2074 rxtx_port_config(port);
2076 rte_eth_macaddr_get(pid, &port->eth_addr);
2078 map_port_queue_stats_mapping_registers(pid, port);
2079 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2080 rte_pmd_ixgbe_bypass_init(pid);
2083 if (lsc_interrupt &&
2084 (rte_eth_devices[pid].data->dev_flags &
2085 RTE_ETH_DEV_INTR_LSC))
2086 port->dev_conf.intr_conf.lsc = 1;
2087 if (rmv_interrupt &&
2088 (rte_eth_devices[pid].data->dev_flags &
2089 RTE_ETH_DEV_INTR_RMV))
2090 port->dev_conf.intr_conf.rmv = 1;
2094 void set_port_slave_flag(portid_t slave_pid)
2096 struct rte_port *port;
2098 port = &ports[slave_pid];
2099 port->slave_flag = 1;
2102 void clear_port_slave_flag(portid_t slave_pid)
2104 struct rte_port *port;
2106 port = &ports[slave_pid];
2107 port->slave_flag = 0;
2110 uint8_t port_is_bonding_slave(portid_t slave_pid)
2112 struct rte_port *port;
2114 port = &ports[slave_pid];
2115 return port->slave_flag;
2118 const uint16_t vlan_tags[] = {
2119 0, 1, 2, 3, 4, 5, 6, 7,
2120 8, 9, 10, 11, 12, 13, 14, 15,
2121 16, 17, 18, 19, 20, 21, 22, 23,
2122 24, 25, 26, 27, 28, 29, 30, 31
2126 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2127 enum dcb_mode_enable dcb_mode,
2128 enum rte_eth_nb_tcs num_tcs,
2134 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2135 * given above, and the number of traffic classes available for use.
2137 if (dcb_mode == DCB_VT_ENABLED) {
2138 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2139 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2140 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2141 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2143 /* VMDQ+DCB RX and TX configurations */
2144 vmdq_rx_conf->enable_default_pool = 0;
2145 vmdq_rx_conf->default_pool = 0;
2146 vmdq_rx_conf->nb_queue_pools =
2147 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2148 vmdq_tx_conf->nb_queue_pools =
2149 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2151 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2152 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2153 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2154 vmdq_rx_conf->pool_map[i].pools =
2155 1 << (i % vmdq_rx_conf->nb_queue_pools);
2157 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2158 vmdq_rx_conf->dcb_tc[i] = i;
2159 vmdq_tx_conf->dcb_tc[i] = i;
2162 /* set DCB mode of RX and TX of multiple queues */
2163 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2164 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2166 struct rte_eth_dcb_rx_conf *rx_conf =
2167 ð_conf->rx_adv_conf.dcb_rx_conf;
2168 struct rte_eth_dcb_tx_conf *tx_conf =
2169 ð_conf->tx_adv_conf.dcb_tx_conf;
2171 rx_conf->nb_tcs = num_tcs;
2172 tx_conf->nb_tcs = num_tcs;
2174 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2175 rx_conf->dcb_tc[i] = i % num_tcs;
2176 tx_conf->dcb_tc[i] = i % num_tcs;
2178 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2179 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2180 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2184 eth_conf->dcb_capability_en =
2185 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2187 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2193 init_port_dcb_config(portid_t pid,
2194 enum dcb_mode_enable dcb_mode,
2195 enum rte_eth_nb_tcs num_tcs,
2198 struct rte_eth_conf port_conf;
2199 struct rte_port *rte_port;
2203 rte_port = &ports[pid];
2205 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2206 /* Enter DCB configuration status */
2209 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2210 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2213 port_conf.rxmode.hw_vlan_filter = 1;
2216 * Write the configuration into the device.
2217 * Set the numbers of RX & TX queues to 0, so
2218 * the RX & TX queues will not be setup.
2220 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2222 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2224 /* If dev_info.vmdq_pool_base is greater than 0,
2225 * the queue id of vmdq pools is started after pf queues.
2227 if (dcb_mode == DCB_VT_ENABLED &&
2228 rte_port->dev_info.vmdq_pool_base > 0) {
2229 printf("VMDQ_DCB multi-queue mode is nonsensical"
2230 " for port %d.", pid);
2234 /* Assume the ports in testpmd have the same dcb capability
2235 * and has the same number of rxq and txq in dcb mode
2237 if (dcb_mode == DCB_VT_ENABLED) {
2238 if (rte_port->dev_info.max_vfs > 0) {
2239 nb_rxq = rte_port->dev_info.nb_rx_queues;
2240 nb_txq = rte_port->dev_info.nb_tx_queues;
2242 nb_rxq = rte_port->dev_info.max_rx_queues;
2243 nb_txq = rte_port->dev_info.max_tx_queues;
2246 /*if vt is disabled, use all pf queues */
2247 if (rte_port->dev_info.vmdq_pool_base == 0) {
2248 nb_rxq = rte_port->dev_info.max_rx_queues;
2249 nb_txq = rte_port->dev_info.max_tx_queues;
2251 nb_rxq = (queueid_t)num_tcs;
2252 nb_txq = (queueid_t)num_tcs;
2256 rx_free_thresh = 64;
2258 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2260 rxtx_port_config(rte_port);
2262 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2263 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2264 rx_vft_set(pid, vlan_tags[i], 1);
2266 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2267 map_port_queue_stats_mapping_registers(pid, rte_port);
2269 rte_port->dcb_flag = 1;
2277 /* Configuration of Ethernet ports. */
2278 ports = rte_zmalloc("testpmd: ports",
2279 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2280 RTE_CACHE_LINE_SIZE);
2281 if (ports == NULL) {
2282 rte_exit(EXIT_FAILURE,
2283 "rte_zmalloc(%d struct rte_port) failed\n",
2299 const char clr[] = { 27, '[', '2', 'J', '\0' };
2300 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2302 /* Clear screen and move to top left */
2303 printf("%s%s", clr, top_left);
2305 printf("\nPort statistics ====================================");
2306 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2307 nic_stats_display(fwd_ports_ids[i]);
2311 signal_handler(int signum)
2313 if (signum == SIGINT || signum == SIGTERM) {
2314 printf("\nSignal %d received, preparing to exit...\n",
2316 #ifdef RTE_LIBRTE_PDUMP
2317 /* uninitialize packet capture framework */
2320 #ifdef RTE_LIBRTE_LATENCY_STATS
2321 rte_latencystats_uninit();
2324 /* exit with the expected status */
2325 signal(signum, SIG_DFL);
2326 kill(getpid(), signum);
2331 main(int argc, char** argv)
2336 signal(SIGINT, signal_handler);
2337 signal(SIGTERM, signal_handler);
2339 diag = rte_eal_init(argc, argv);
2341 rte_panic("Cannot init EAL\n");
2343 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2344 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2348 #ifdef RTE_LIBRTE_PDUMP
2349 /* initialize packet capture framework */
2350 rte_pdump_init(NULL);
2353 nb_ports = (portid_t) rte_eth_dev_count();
2355 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2357 /* allocate port structures, and init them */
2360 set_def_fwd_config();
2362 rte_panic("Empty set of forwarding logical cores - check the "
2363 "core mask supplied in the command parameters\n");
2365 /* Bitrate/latency stats disabled by default */
2366 #ifdef RTE_LIBRTE_BITRATE
2367 bitrate_enabled = 0;
2369 #ifdef RTE_LIBRTE_LATENCY_STATS
2370 latencystats_enabled = 0;
2376 launch_args_parse(argc, argv);
2378 if (tx_first && interactive)
2379 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2380 "interactive mode.\n");
2382 if (tx_first && lsc_interrupt) {
2383 printf("Warning: lsc_interrupt needs to be off when "
2384 " using tx_first. Disabling.\n");
2388 if (!nb_rxq && !nb_txq)
2389 printf("Warning: Either rx or tx queues should be non-zero\n");
2391 if (nb_rxq > 1 && nb_rxq > nb_txq)
2392 printf("Warning: nb_rxq=%d enables RSS configuration, "
2393 "but nb_txq=%d will prevent to fully test it.\n",
2397 if (start_port(RTE_PORT_ALL) != 0)
2398 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2400 /* set all ports to promiscuous mode by default */
2401 RTE_ETH_FOREACH_DEV(port_id)
2402 rte_eth_promiscuous_enable(port_id);
2404 /* Init metrics library */
2405 rte_metrics_init(rte_socket_id());
2407 #ifdef RTE_LIBRTE_LATENCY_STATS
2408 if (latencystats_enabled != 0) {
2409 int ret = rte_latencystats_init(1, NULL);
2411 printf("Warning: latencystats init()"
2412 " returned error %d\n", ret);
2413 printf("Latencystats running on lcore %d\n",
2414 latencystats_lcore_id);
2418 /* Setup bitrate stats */
2419 #ifdef RTE_LIBRTE_BITRATE
2420 if (bitrate_enabled != 0) {
2421 bitrate_data = rte_stats_bitrate_create();
2422 if (bitrate_data == NULL)
2423 rte_exit(EXIT_FAILURE,
2424 "Could not allocate bitrate data.\n");
2425 rte_stats_bitrate_reg(bitrate_data);
2429 #ifdef RTE_LIBRTE_CMDLINE
2430 if (strlen(cmdline_filename) != 0)
2431 cmdline_read_from_file(cmdline_filename);
2433 if (interactive == 1) {
2435 printf("Start automatic packet forwarding\n");
2436 start_packet_forwarding(0);
2446 printf("No commandline core given, start packet forwarding\n");
2447 start_packet_forwarding(tx_first);
2448 if (stats_period != 0) {
2449 uint64_t prev_time = 0, cur_time, diff_time = 0;
2450 uint64_t timer_period;
2452 /* Convert to number of cycles */
2453 timer_period = stats_period * rte_get_timer_hz();
2456 cur_time = rte_get_timer_cycles();
2457 diff_time += cur_time - prev_time;
2459 if (diff_time >= timer_period) {
2461 /* Reset the timer */
2464 /* Sleep to avoid unnecessary checks */
2465 prev_time = cur_time;
2470 printf("Press enter to exit\n");
2471 rc = read(0, &c, 1);