4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 int testpmd_logtype; /**< Log type for testpmd logs */
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
100 char cmdline_filename[PATH_MAX] = {0};
103 * NUMA support configuration.
104 * When set, the NUMA support attempts to dispatch the allocation of the
105 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106 * probed ports among the CPU sockets 0 and 1.
107 * Otherwise, all memory is allocated from CPU socket 0.
109 uint8_t numa_support = 1; /**< numa enabled by default */
112 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
115 uint8_t socket_num = UMA_NO_CONFIG;
118 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
123 * Record the Ethernet address of peer target ports to which packets are
125 * Must be instantiated with the ethernet addresses of peer traffic generator
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
132 * Probed Target Environment.
134 struct rte_port *ports; /**< For all probed ethernet ports. */
135 portid_t nb_ports; /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
140 * Test Forwarding Configuration.
141 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t nb_cfg_ports; /**< Number of configured ports. */
147 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
156 * Forwarding engines.
158 struct fwd_engine * fwd_engines[] = {
167 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
169 &softnic_tm_bypass_engine,
171 #ifdef RTE_LIBRTE_IEEE1588
172 &ieee1588_fwd_engine,
177 struct fwd_config cur_fwd_config;
178 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
179 uint32_t retry_enabled;
180 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
181 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
183 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
184 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
185 * specified on command-line. */
186 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
189 * In container, it cannot terminate the process which running with 'stats-period'
190 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
195 * Configuration of packet segments used by the "txonly" processing engine.
197 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
198 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
199 TXONLY_DEF_PACKET_LEN,
201 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
203 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
204 /**< Split policy for packets to TX. */
206 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
207 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
209 /* current configuration is in DCB or not,0 means it is not in DCB mode */
210 uint8_t dcb_config = 0;
212 /* Whether the dcb is in testing status */
213 uint8_t dcb_test = 0;
216 * Configurable number of RX/TX queues.
218 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
219 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
222 * Configurable number of RX/TX ring descriptors.
224 #define RTE_TEST_RX_DESC_DEFAULT 128
225 #define RTE_TEST_TX_DESC_DEFAULT 512
226 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
227 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
229 #define RTE_PMD_PARAM_UNSET -1
231 * Configurable values of RX and TX ring threshold registers.
234 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
240 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
243 * Configurable value of RX free threshold.
245 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
248 * Configurable value of RX drop enable.
250 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
253 * Configurable value of TX free threshold.
255 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of TX RS bit threshold.
260 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of TX queue flags.
265 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
268 * Receive Side Scaling (RSS) configuration.
270 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
273 * Port topology configuration
275 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
278 * Avoids to flush all the RX streams before starts forwarding.
280 uint8_t no_flush_rx = 0; /* flush by default */
283 * Flow API isolated mode.
285 uint8_t flow_isolate_all;
288 * Avoids to check link status when starting/stopping a port.
290 uint8_t no_link_check = 0; /* check by default */
293 * Enable link status change notification
295 uint8_t lsc_interrupt = 1; /* enabled by default */
298 * Enable device removal notification.
300 uint8_t rmv_interrupt = 1; /* enabled by default */
303 * Display or mask ether events
304 * Default to all events except VF_MBOX
306 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
307 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
308 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
309 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
310 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
311 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
314 * NIC bypass mode configuration options.
317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
318 /* The NIC bypass watchdog timeout. */
319 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
323 #ifdef RTE_LIBRTE_LATENCY_STATS
326 * Set when latency stats is enabled in the commandline
328 uint8_t latencystats_enabled;
331 * Lcore ID to serive latency statistics.
333 lcoreid_t latencystats_lcore_id = -1;
338 * Ethernet device configuration.
340 struct rte_eth_rxmode rx_mode = {
341 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
343 .header_split = 0, /**< Header Split disabled. */
344 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
345 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
346 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
347 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
348 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
349 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
350 .hw_timestamp = 0, /**< HW timestamp enabled. */
353 struct rte_fdir_conf fdir_conf = {
354 .mode = RTE_FDIR_MODE_NONE,
355 .pballoc = RTE_FDIR_PBALLOC_64K,
356 .status = RTE_FDIR_REPORT_STATUS,
358 .vlan_tci_mask = 0x0,
360 .src_ip = 0xFFFFFFFF,
361 .dst_ip = 0xFFFFFFFF,
364 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
367 .src_port_mask = 0xFFFF,
368 .dst_port_mask = 0xFFFF,
369 .mac_addr_byte_mask = 0xFF,
370 .tunnel_type_mask = 1,
371 .tunnel_id_mask = 0xFFFFFFFF,
376 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
378 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
379 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
381 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
382 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
384 uint16_t nb_tx_queue_stats_mappings = 0;
385 uint16_t nb_rx_queue_stats_mappings = 0;
388 * Display zero values by default for xstats
390 uint8_t xstats_hide_zero;
392 unsigned int num_sockets = 0;
393 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
395 #ifdef RTE_LIBRTE_BITRATE
396 /* Bitrate statistics */
397 struct rte_stats_bitrates *bitrate_data;
398 lcoreid_t bitrate_lcore_id;
399 uint8_t bitrate_enabled;
402 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
403 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
405 /* Forward function declarations */
406 static void map_port_queue_stats_mapping_registers(portid_t pi,
407 struct rte_port *port);
408 static void check_all_ports_link_status(uint32_t port_mask);
409 static int eth_event_callback(portid_t port_id,
410 enum rte_eth_event_type type,
411 void *param, void *ret_param);
414 * Check if all the ports are started.
415 * If yes, return positive value. If not, return zero.
417 static int all_ports_started(void);
419 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
420 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
423 * Helper function to check if socket is already discovered.
424 * If yes, return positive value. If not, return zero.
427 new_socket_id(unsigned int socket_id)
431 for (i = 0; i < num_sockets; i++) {
432 if (socket_ids[i] == socket_id)
439 * Setup default configuration.
442 set_default_fwd_lcores_config(void)
446 unsigned int sock_num;
449 for (i = 0; i < RTE_MAX_LCORE; i++) {
450 sock_num = rte_lcore_to_socket_id(i);
451 if (new_socket_id(sock_num)) {
452 if (num_sockets >= RTE_MAX_NUMA_NODES) {
453 rte_exit(EXIT_FAILURE,
454 "Total sockets greater than %u\n",
457 socket_ids[num_sockets++] = sock_num;
459 if (!rte_lcore_is_enabled(i))
461 if (i == rte_get_master_lcore())
463 fwd_lcores_cpuids[nb_lc++] = i;
465 nb_lcores = (lcoreid_t) nb_lc;
466 nb_cfg_lcores = nb_lcores;
471 set_def_peer_eth_addrs(void)
475 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
476 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
477 peer_eth_addrs[i].addr_bytes[5] = i;
482 set_default_fwd_ports_config(void)
487 RTE_ETH_FOREACH_DEV(pt_id)
488 fwd_ports_ids[i++] = pt_id;
490 nb_cfg_ports = nb_ports;
491 nb_fwd_ports = nb_ports;
495 set_def_fwd_config(void)
497 set_default_fwd_lcores_config();
498 set_def_peer_eth_addrs();
499 set_default_fwd_ports_config();
503 * Configuration initialisation done once at init time.
506 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
507 unsigned int socket_id)
509 char pool_name[RTE_MEMPOOL_NAMESIZE];
510 struct rte_mempool *rte_mp = NULL;
513 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
514 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
517 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
518 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
521 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
522 mb_size, (unsigned) mb_mempool_cache,
523 sizeof(struct rte_pktmbuf_pool_private),
528 if (rte_mempool_populate_anon(rte_mp) == 0) {
529 rte_mempool_free(rte_mp);
533 rte_pktmbuf_pool_init(rte_mp, NULL);
534 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
536 /* wrapper to rte_mempool_create() */
537 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
538 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
542 if (rte_mp == NULL) {
543 rte_exit(EXIT_FAILURE,
544 "Creation of mbuf pool for socket %u failed: %s\n",
545 socket_id, rte_strerror(rte_errno));
546 } else if (verbose_level > 0) {
547 rte_mempool_dump(stdout, rte_mp);
552 * Check given socket id is valid or not with NUMA mode,
553 * if valid, return 0, else return -1
556 check_socket_id(const unsigned int socket_id)
558 static int warning_once = 0;
560 if (new_socket_id(socket_id)) {
561 if (!warning_once && numa_support)
562 printf("Warning: NUMA should be configured manually by"
563 " using --port-numa-config and"
564 " --ring-numa-config parameters along with"
576 struct rte_port *port;
577 struct rte_mempool *mbp;
578 unsigned int nb_mbuf_per_pool;
580 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
581 struct rte_gro_param gro_param;
584 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
587 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
588 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
589 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
592 /* Configuration of logical cores. */
593 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
594 sizeof(struct fwd_lcore *) * nb_lcores,
595 RTE_CACHE_LINE_SIZE);
596 if (fwd_lcores == NULL) {
597 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
598 "failed\n", nb_lcores);
600 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
601 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
602 sizeof(struct fwd_lcore),
603 RTE_CACHE_LINE_SIZE);
604 if (fwd_lcores[lc_id] == NULL) {
605 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
608 fwd_lcores[lc_id]->cpuid_idx = lc_id;
611 RTE_ETH_FOREACH_DEV(pid) {
613 rte_eth_dev_info_get(pid, &port->dev_info);
616 if (port_numa[pid] != NUMA_NO_CONFIG)
617 port_per_socket[port_numa[pid]]++;
619 uint32_t socket_id = rte_eth_dev_socket_id(pid);
621 /* if socket_id is invalid, set to 0 */
622 if (check_socket_id(socket_id) < 0)
624 port_per_socket[socket_id]++;
628 /* set flag to initialize port/queue */
629 port->need_reconfig = 1;
630 port->need_reconfig_queues = 1;
634 * Create pools of mbuf.
635 * If NUMA support is disabled, create a single pool of mbuf in
636 * socket 0 memory by default.
637 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
639 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
640 * nb_txd can be configured at run time.
642 if (param_total_num_mbufs)
643 nb_mbuf_per_pool = param_total_num_mbufs;
645 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
646 (nb_lcores * mb_mempool_cache) +
647 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
648 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
654 for (i = 0; i < num_sockets; i++)
655 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
658 if (socket_num == UMA_NO_CONFIG)
659 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
661 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
667 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
668 DEV_TX_OFFLOAD_GRE_TNL_TSO;
670 * Records which Mbuf pool to use by each logical core, if needed.
672 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
673 mbp = mbuf_pool_find(
674 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
677 mbp = mbuf_pool_find(0);
678 fwd_lcores[lc_id]->mbp = mbp;
679 /* initialize GSO context */
680 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
681 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
682 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
683 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
685 fwd_lcores[lc_id]->gso_ctx.flag = 0;
688 /* Configuration of packet forwarding streams. */
689 if (init_fwd_streams() < 0)
690 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
694 /* create a gro context for each lcore */
695 gro_param.gro_types = RTE_GRO_TCP_IPV4;
696 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
697 gro_param.max_item_per_flow = MAX_PKT_BURST;
698 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
699 gro_param.socket_id = rte_lcore_to_socket_id(
700 fwd_lcores_cpuids[lc_id]);
701 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
702 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
703 rte_exit(EXIT_FAILURE,
704 "rte_gro_ctx_create() failed\n");
711 reconfig(portid_t new_port_id, unsigned socket_id)
713 struct rte_port *port;
715 /* Reconfiguration of Ethernet ports. */
716 port = &ports[new_port_id];
717 rte_eth_dev_info_get(new_port_id, &port->dev_info);
719 /* set flag to initialize port/queue */
720 port->need_reconfig = 1;
721 port->need_reconfig_queues = 1;
722 port->socket_id = socket_id;
729 init_fwd_streams(void)
732 struct rte_port *port;
733 streamid_t sm_id, nb_fwd_streams_new;
736 /* set socket id according to numa or not */
737 RTE_ETH_FOREACH_DEV(pid) {
739 if (nb_rxq > port->dev_info.max_rx_queues) {
740 printf("Fail: nb_rxq(%d) is greater than "
741 "max_rx_queues(%d)\n", nb_rxq,
742 port->dev_info.max_rx_queues);
745 if (nb_txq > port->dev_info.max_tx_queues) {
746 printf("Fail: nb_txq(%d) is greater than "
747 "max_tx_queues(%d)\n", nb_txq,
748 port->dev_info.max_tx_queues);
752 if (port_numa[pid] != NUMA_NO_CONFIG)
753 port->socket_id = port_numa[pid];
755 port->socket_id = rte_eth_dev_socket_id(pid);
757 /* if socket_id is invalid, set to 0 */
758 if (check_socket_id(port->socket_id) < 0)
763 if (socket_num == UMA_NO_CONFIG)
766 port->socket_id = socket_num;
770 q = RTE_MAX(nb_rxq, nb_txq);
772 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
775 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
776 if (nb_fwd_streams_new == nb_fwd_streams)
779 if (fwd_streams != NULL) {
780 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
781 if (fwd_streams[sm_id] == NULL)
783 rte_free(fwd_streams[sm_id]);
784 fwd_streams[sm_id] = NULL;
786 rte_free(fwd_streams);
791 nb_fwd_streams = nb_fwd_streams_new;
792 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
793 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
794 if (fwd_streams == NULL)
795 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
796 "failed\n", nb_fwd_streams);
798 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
799 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
800 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
801 if (fwd_streams[sm_id] == NULL)
802 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
809 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
811 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
813 unsigned int total_burst;
814 unsigned int nb_burst;
815 unsigned int burst_stats[3];
816 uint16_t pktnb_stats[3];
818 int burst_percent[3];
821 * First compute the total number of packet bursts and the
822 * two highest numbers of bursts of the same number of packets.
825 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
826 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
827 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
828 nb_burst = pbs->pkt_burst_spread[nb_pkt];
831 total_burst += nb_burst;
832 if (nb_burst > burst_stats[0]) {
833 burst_stats[1] = burst_stats[0];
834 pktnb_stats[1] = pktnb_stats[0];
835 burst_stats[0] = nb_burst;
836 pktnb_stats[0] = nb_pkt;
839 if (total_burst == 0)
841 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
842 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
843 burst_percent[0], (int) pktnb_stats[0]);
844 if (burst_stats[0] == total_burst) {
848 if (burst_stats[0] + burst_stats[1] == total_burst) {
849 printf(" + %d%% of %d pkts]\n",
850 100 - burst_percent[0], pktnb_stats[1]);
853 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
854 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
855 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
856 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
859 printf(" + %d%% of %d pkts + %d%% of others]\n",
860 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
862 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
865 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
867 struct rte_port *port;
870 static const char *fwd_stats_border = "----------------------";
872 port = &ports[port_id];
873 printf("\n %s Forward statistics for port %-2d %s\n",
874 fwd_stats_border, port_id, fwd_stats_border);
876 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
877 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
879 stats->ipackets, stats->imissed,
880 (uint64_t) (stats->ipackets + stats->imissed));
882 if (cur_fwd_eng == &csum_fwd_engine)
883 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
884 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
885 if ((stats->ierrors + stats->rx_nombuf) > 0) {
886 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
887 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
890 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
892 stats->opackets, port->tx_dropped,
893 (uint64_t) (stats->opackets + port->tx_dropped));
896 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
898 stats->ipackets, stats->imissed,
899 (uint64_t) (stats->ipackets + stats->imissed));
901 if (cur_fwd_eng == &csum_fwd_engine)
902 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
903 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
904 if ((stats->ierrors + stats->rx_nombuf) > 0) {
905 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
906 printf(" RX-nombufs: %14"PRIu64"\n",
910 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
912 stats->opackets, port->tx_dropped,
913 (uint64_t) (stats->opackets + port->tx_dropped));
916 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
918 pkt_burst_stats_display("RX",
919 &port->rx_stream->rx_burst_stats);
921 pkt_burst_stats_display("TX",
922 &port->tx_stream->tx_burst_stats);
925 if (port->rx_queue_stats_mapping_enabled) {
927 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
928 printf(" Stats reg %2d RX-packets:%14"PRIu64
929 " RX-errors:%14"PRIu64
930 " RX-bytes:%14"PRIu64"\n",
931 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
935 if (port->tx_queue_stats_mapping_enabled) {
936 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
937 printf(" Stats reg %2d TX-packets:%14"PRIu64
938 " TX-bytes:%14"PRIu64"\n",
939 i, stats->q_opackets[i], stats->q_obytes[i]);
943 printf(" %s--------------------------------%s\n",
944 fwd_stats_border, fwd_stats_border);
948 fwd_stream_stats_display(streamid_t stream_id)
950 struct fwd_stream *fs;
951 static const char *fwd_top_stats_border = "-------";
953 fs = fwd_streams[stream_id];
954 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
955 (fs->fwd_dropped == 0))
957 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
958 "TX Port=%2d/Queue=%2d %s\n",
959 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
960 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
961 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
962 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
964 /* if checksum mode */
965 if (cur_fwd_eng == &csum_fwd_engine) {
966 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
967 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
970 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
971 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
972 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
977 flush_fwd_rx_queues(void)
979 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
986 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
987 uint64_t timer_period;
989 /* convert to number of cycles */
990 timer_period = rte_get_timer_hz(); /* 1 second timeout */
992 for (j = 0; j < 2; j++) {
993 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
994 for (rxq = 0; rxq < nb_rxq; rxq++) {
995 port_id = fwd_ports_ids[rxp];
997 * testpmd can stuck in the below do while loop
998 * if rte_eth_rx_burst() always returns nonzero
999 * packets. So timer is added to exit this loop
1000 * after 1sec timer expiry.
1002 prev_tsc = rte_rdtsc();
1004 nb_rx = rte_eth_rx_burst(port_id, rxq,
1005 pkts_burst, MAX_PKT_BURST);
1006 for (i = 0; i < nb_rx; i++)
1007 rte_pktmbuf_free(pkts_burst[i]);
1009 cur_tsc = rte_rdtsc();
1010 diff_tsc = cur_tsc - prev_tsc;
1011 timer_tsc += diff_tsc;
1012 } while ((nb_rx > 0) &&
1013 (timer_tsc < timer_period));
1017 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1022 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1024 struct fwd_stream **fsm;
1027 #ifdef RTE_LIBRTE_BITRATE
1028 uint64_t tics_per_1sec;
1029 uint64_t tics_datum;
1030 uint64_t tics_current;
1031 uint8_t idx_port, cnt_ports;
1033 cnt_ports = rte_eth_dev_count();
1034 tics_datum = rte_rdtsc();
1035 tics_per_1sec = rte_get_timer_hz();
1037 fsm = &fwd_streams[fc->stream_idx];
1038 nb_fs = fc->stream_nb;
1040 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1041 (*pkt_fwd)(fsm[sm_id]);
1042 #ifdef RTE_LIBRTE_BITRATE
1043 if (bitrate_enabled != 0 &&
1044 bitrate_lcore_id == rte_lcore_id()) {
1045 tics_current = rte_rdtsc();
1046 if (tics_current - tics_datum >= tics_per_1sec) {
1047 /* Periodic bitrate calculation */
1049 idx_port < cnt_ports;
1051 rte_stats_bitrate_calc(bitrate_data,
1053 tics_datum = tics_current;
1057 #ifdef RTE_LIBRTE_LATENCY_STATS
1058 if (latencystats_enabled != 0 &&
1059 latencystats_lcore_id == rte_lcore_id())
1060 rte_latencystats_update();
1063 } while (! fc->stopped);
1067 start_pkt_forward_on_core(void *fwd_arg)
1069 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1070 cur_fwd_config.fwd_eng->packet_fwd);
1075 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1076 * Used to start communication flows in network loopback test configurations.
1079 run_one_txonly_burst_on_core(void *fwd_arg)
1081 struct fwd_lcore *fwd_lc;
1082 struct fwd_lcore tmp_lcore;
1084 fwd_lc = (struct fwd_lcore *) fwd_arg;
1085 tmp_lcore = *fwd_lc;
1086 tmp_lcore.stopped = 1;
1087 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1092 * Launch packet forwarding:
1093 * - Setup per-port forwarding context.
1094 * - launch logical cores with their forwarding configuration.
1097 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1099 port_fwd_begin_t port_fwd_begin;
1104 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1105 if (port_fwd_begin != NULL) {
1106 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1107 (*port_fwd_begin)(fwd_ports_ids[i]);
1109 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1110 lc_id = fwd_lcores_cpuids[i];
1111 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1112 fwd_lcores[i]->stopped = 0;
1113 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1114 fwd_lcores[i], lc_id);
1116 printf("launch lcore %u failed - diag=%d\n",
1123 * Launch packet forwarding configuration.
1126 start_packet_forwarding(int with_tx_first)
1128 port_fwd_begin_t port_fwd_begin;
1129 port_fwd_end_t port_fwd_end;
1130 struct rte_port *port;
1135 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1136 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1138 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1139 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1141 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1142 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1143 (!nb_rxq || !nb_txq))
1144 rte_exit(EXIT_FAILURE,
1145 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1146 cur_fwd_eng->fwd_mode_name);
1148 if (all_ports_started() == 0) {
1149 printf("Not all ports were started\n");
1152 if (test_done == 0) {
1153 printf("Packet forwarding already started\n");
1157 if (init_fwd_streams() < 0) {
1158 printf("Fail from init_fwd_streams()\n");
1163 for (i = 0; i < nb_fwd_ports; i++) {
1164 pt_id = fwd_ports_ids[i];
1165 port = &ports[pt_id];
1166 if (!port->dcb_flag) {
1167 printf("In DCB mode, all forwarding ports must "
1168 "be configured in this mode.\n");
1172 if (nb_fwd_lcores == 1) {
1173 printf("In DCB mode,the nb forwarding cores "
1174 "should be larger than 1.\n");
1181 flush_fwd_rx_queues();
1184 pkt_fwd_config_display(&cur_fwd_config);
1185 rxtx_config_display();
1187 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1188 pt_id = fwd_ports_ids[i];
1189 port = &ports[pt_id];
1190 rte_eth_stats_get(pt_id, &port->stats);
1191 port->tx_dropped = 0;
1193 map_port_queue_stats_mapping_registers(pt_id, port);
1195 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1196 fwd_streams[sm_id]->rx_packets = 0;
1197 fwd_streams[sm_id]->tx_packets = 0;
1198 fwd_streams[sm_id]->fwd_dropped = 0;
1199 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1200 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1202 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1203 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1204 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1205 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1206 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1208 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1209 fwd_streams[sm_id]->core_cycles = 0;
1212 if (with_tx_first) {
1213 port_fwd_begin = tx_only_engine.port_fwd_begin;
1214 if (port_fwd_begin != NULL) {
1215 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1216 (*port_fwd_begin)(fwd_ports_ids[i]);
1218 while (with_tx_first--) {
1219 launch_packet_forwarding(
1220 run_one_txonly_burst_on_core);
1221 rte_eal_mp_wait_lcore();
1223 port_fwd_end = tx_only_engine.port_fwd_end;
1224 if (port_fwd_end != NULL) {
1225 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1226 (*port_fwd_end)(fwd_ports_ids[i]);
1229 launch_packet_forwarding(start_pkt_forward_on_core);
1233 stop_packet_forwarding(void)
1235 struct rte_eth_stats stats;
1236 struct rte_port *port;
1237 port_fwd_end_t port_fwd_end;
1242 uint64_t total_recv;
1243 uint64_t total_xmit;
1244 uint64_t total_rx_dropped;
1245 uint64_t total_tx_dropped;
1246 uint64_t total_rx_nombuf;
1247 uint64_t tx_dropped;
1248 uint64_t rx_bad_ip_csum;
1249 uint64_t rx_bad_l4_csum;
1250 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1251 uint64_t fwd_cycles;
1254 static const char *acc_stats_border = "+++++++++++++++";
1257 printf("Packet forwarding not started\n");
1260 printf("Telling cores to stop...");
1261 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1262 fwd_lcores[lc_id]->stopped = 1;
1263 printf("\nWaiting for lcores to finish...\n");
1264 rte_eal_mp_wait_lcore();
1265 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1266 if (port_fwd_end != NULL) {
1267 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1268 pt_id = fwd_ports_ids[i];
1269 (*port_fwd_end)(pt_id);
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1275 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1276 if (cur_fwd_config.nb_fwd_streams >
1277 cur_fwd_config.nb_fwd_ports) {
1278 fwd_stream_stats_display(sm_id);
1279 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1280 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1282 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1284 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1287 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1288 tx_dropped = (uint64_t) (tx_dropped +
1289 fwd_streams[sm_id]->fwd_dropped);
1290 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1293 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1294 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1295 fwd_streams[sm_id]->rx_bad_ip_csum);
1296 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1300 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1301 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1302 fwd_streams[sm_id]->rx_bad_l4_csum);
1303 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1306 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1307 fwd_cycles = (uint64_t) (fwd_cycles +
1308 fwd_streams[sm_id]->core_cycles);
1313 total_rx_dropped = 0;
1314 total_tx_dropped = 0;
1315 total_rx_nombuf = 0;
1316 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1317 pt_id = fwd_ports_ids[i];
1319 port = &ports[pt_id];
1320 rte_eth_stats_get(pt_id, &stats);
1321 stats.ipackets -= port->stats.ipackets;
1322 port->stats.ipackets = 0;
1323 stats.opackets -= port->stats.opackets;
1324 port->stats.opackets = 0;
1325 stats.ibytes -= port->stats.ibytes;
1326 port->stats.ibytes = 0;
1327 stats.obytes -= port->stats.obytes;
1328 port->stats.obytes = 0;
1329 stats.imissed -= port->stats.imissed;
1330 port->stats.imissed = 0;
1331 stats.oerrors -= port->stats.oerrors;
1332 port->stats.oerrors = 0;
1333 stats.rx_nombuf -= port->stats.rx_nombuf;
1334 port->stats.rx_nombuf = 0;
1336 total_recv += stats.ipackets;
1337 total_xmit += stats.opackets;
1338 total_rx_dropped += stats.imissed;
1339 total_tx_dropped += port->tx_dropped;
1340 total_rx_nombuf += stats.rx_nombuf;
1342 fwd_port_stats_display(pt_id, &stats);
1345 printf("\n %s Accumulated forward statistics for all ports"
1347 acc_stats_border, acc_stats_border);
1348 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1350 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1352 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1353 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1354 if (total_rx_nombuf > 0)
1355 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1356 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1358 acc_stats_border, acc_stats_border);
1359 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1361 printf("\n CPU cycles/packet=%u (total cycles="
1362 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1363 (unsigned int)(fwd_cycles / total_recv),
1364 fwd_cycles, total_recv);
1366 printf("\nDone.\n");
1371 dev_set_link_up(portid_t pid)
1373 if (rte_eth_dev_set_link_up(pid) < 0)
1374 printf("\nSet link up fail.\n");
1378 dev_set_link_down(portid_t pid)
1380 if (rte_eth_dev_set_link_down(pid) < 0)
1381 printf("\nSet link down fail.\n");
1385 all_ports_started(void)
1388 struct rte_port *port;
1390 RTE_ETH_FOREACH_DEV(pi) {
1392 /* Check if there is a port which is not started */
1393 if ((port->port_status != RTE_PORT_STARTED) &&
1394 (port->slave_flag == 0))
1398 /* No port is not started */
1403 all_ports_stopped(void)
1406 struct rte_port *port;
1408 RTE_ETH_FOREACH_DEV(pi) {
1410 if ((port->port_status != RTE_PORT_STOPPED) &&
1411 (port->slave_flag == 0))
1419 port_is_started(portid_t port_id)
1421 if (port_id_is_invalid(port_id, ENABLED_WARN))
1424 if (ports[port_id].port_status != RTE_PORT_STARTED)
1431 port_is_closed(portid_t port_id)
1433 if (port_id_is_invalid(port_id, ENABLED_WARN))
1436 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1443 start_port(portid_t pid)
1445 int diag, need_check_link_status = -1;
1448 struct rte_port *port;
1449 struct ether_addr mac_addr;
1450 enum rte_eth_event_type event_type;
1452 if (port_id_is_invalid(pid, ENABLED_WARN))
1457 RTE_ETH_FOREACH_DEV(pi) {
1458 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1461 need_check_link_status = 0;
1463 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1464 RTE_PORT_HANDLING) == 0) {
1465 printf("Port %d is now not stopped\n", pi);
1469 if (port->need_reconfig > 0) {
1470 port->need_reconfig = 0;
1472 if (flow_isolate_all) {
1473 int ret = port_flow_isolate(pi, 1);
1475 printf("Failed to apply isolated"
1476 " mode on port %d\n", pi);
1481 printf("Configuring Port %d (socket %u)\n", pi,
1483 /* configure port */
1484 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1487 if (rte_atomic16_cmpset(&(port->port_status),
1488 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1489 printf("Port %d can not be set back "
1490 "to stopped\n", pi);
1491 printf("Fail to configure port %d\n", pi);
1492 /* try to reconfigure port next time */
1493 port->need_reconfig = 1;
1497 if (port->need_reconfig_queues > 0) {
1498 port->need_reconfig_queues = 0;
1499 /* setup tx queues */
1500 for (qi = 0; qi < nb_txq; qi++) {
1501 if ((numa_support) &&
1502 (txring_numa[pi] != NUMA_NO_CONFIG))
1503 diag = rte_eth_tx_queue_setup(pi, qi,
1504 nb_txd,txring_numa[pi],
1507 diag = rte_eth_tx_queue_setup(pi, qi,
1508 nb_txd,port->socket_id,
1514 /* Fail to setup tx queue, return */
1515 if (rte_atomic16_cmpset(&(port->port_status),
1517 RTE_PORT_STOPPED) == 0)
1518 printf("Port %d can not be set back "
1519 "to stopped\n", pi);
1520 printf("Fail to configure port %d tx queues\n", pi);
1521 /* try to reconfigure queues next time */
1522 port->need_reconfig_queues = 1;
1525 /* setup rx queues */
1526 for (qi = 0; qi < nb_rxq; qi++) {
1527 if ((numa_support) &&
1528 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1529 struct rte_mempool * mp =
1530 mbuf_pool_find(rxring_numa[pi]);
1532 printf("Failed to setup RX queue:"
1533 "No mempool allocation"
1534 " on the socket %d\n",
1539 diag = rte_eth_rx_queue_setup(pi, qi,
1540 nb_rxd,rxring_numa[pi],
1541 &(port->rx_conf),mp);
1543 struct rte_mempool *mp =
1544 mbuf_pool_find(port->socket_id);
1546 printf("Failed to setup RX queue:"
1547 "No mempool allocation"
1548 " on the socket %d\n",
1552 diag = rte_eth_rx_queue_setup(pi, qi,
1553 nb_rxd,port->socket_id,
1554 &(port->rx_conf), mp);
1559 /* Fail to setup rx queue, return */
1560 if (rte_atomic16_cmpset(&(port->port_status),
1562 RTE_PORT_STOPPED) == 0)
1563 printf("Port %d can not be set back "
1564 "to stopped\n", pi);
1565 printf("Fail to configure port %d rx queues\n", pi);
1566 /* try to reconfigure queues next time */
1567 port->need_reconfig_queues = 1;
1572 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1573 event_type < RTE_ETH_EVENT_MAX;
1575 diag = rte_eth_dev_callback_register(pi,
1580 printf("Failed to setup even callback for event %d\n",
1587 if (rte_eth_dev_start(pi) < 0) {
1588 printf("Fail to start port %d\n", pi);
1590 /* Fail to setup rx queue, return */
1591 if (rte_atomic16_cmpset(&(port->port_status),
1592 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1593 printf("Port %d can not be set back to "
1598 if (rte_atomic16_cmpset(&(port->port_status),
1599 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1600 printf("Port %d can not be set into started\n", pi);
1602 rte_eth_macaddr_get(pi, &mac_addr);
1603 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1604 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1605 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1606 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1608 /* at least one port started, need checking link status */
1609 need_check_link_status = 1;
1612 if (need_check_link_status == 1 && !no_link_check)
1613 check_all_ports_link_status(RTE_PORT_ALL);
1614 else if (need_check_link_status == 0)
1615 printf("Please stop the ports first\n");
1622 stop_port(portid_t pid)
1625 struct rte_port *port;
1626 int need_check_link_status = 0;
1633 if (port_id_is_invalid(pid, ENABLED_WARN))
1636 printf("Stopping ports...\n");
1638 RTE_ETH_FOREACH_DEV(pi) {
1639 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1642 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1643 printf("Please remove port %d from forwarding configuration.\n", pi);
1647 if (port_is_bonding_slave(pi)) {
1648 printf("Please remove port %d from bonded device.\n", pi);
1653 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1654 RTE_PORT_HANDLING) == 0)
1657 rte_eth_dev_stop(pi);
1659 if (rte_atomic16_cmpset(&(port->port_status),
1660 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1661 printf("Port %d can not be set into stopped\n", pi);
1662 need_check_link_status = 1;
1664 if (need_check_link_status && !no_link_check)
1665 check_all_ports_link_status(RTE_PORT_ALL);
1671 close_port(portid_t pid)
1674 struct rte_port *port;
1676 if (port_id_is_invalid(pid, ENABLED_WARN))
1679 printf("Closing ports...\n");
1681 RTE_ETH_FOREACH_DEV(pi) {
1682 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1685 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1686 printf("Please remove port %d from forwarding configuration.\n", pi);
1690 if (port_is_bonding_slave(pi)) {
1691 printf("Please remove port %d from bonded device.\n", pi);
1696 if (rte_atomic16_cmpset(&(port->port_status),
1697 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1698 printf("Port %d is already closed\n", pi);
1702 if (rte_atomic16_cmpset(&(port->port_status),
1703 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1704 printf("Port %d is now not stopped\n", pi);
1708 if (port->flow_list)
1709 port_flow_flush(pi);
1710 rte_eth_dev_close(pi);
1712 if (rte_atomic16_cmpset(&(port->port_status),
1713 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1714 printf("Port %d cannot be set to closed\n", pi);
1721 reset_port(portid_t pid)
1725 struct rte_port *port;
1727 if (port_id_is_invalid(pid, ENABLED_WARN))
1730 printf("Resetting ports...\n");
1732 RTE_ETH_FOREACH_DEV(pi) {
1733 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1736 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1737 printf("Please remove port %d from forwarding "
1738 "configuration.\n", pi);
1742 if (port_is_bonding_slave(pi)) {
1743 printf("Please remove port %d from bonded device.\n",
1748 diag = rte_eth_dev_reset(pi);
1751 port->need_reconfig = 1;
1752 port->need_reconfig_queues = 1;
1754 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1762 attach_port(char *identifier)
1765 unsigned int socket_id;
1767 printf("Attaching a new port...\n");
1769 if (identifier == NULL) {
1770 printf("Invalid parameters are specified\n");
1774 if (rte_eth_dev_attach(identifier, &pi))
1777 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1778 /* if socket_id is invalid, set to 0 */
1779 if (check_socket_id(socket_id) < 0)
1781 reconfig(pi, socket_id);
1782 rte_eth_promiscuous_enable(pi);
1784 nb_ports = rte_eth_dev_count();
1786 ports[pi].port_status = RTE_PORT_STOPPED;
1788 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1793 detach_port(portid_t port_id)
1795 char name[RTE_ETH_NAME_MAX_LEN];
1797 printf("Detaching a port...\n");
1799 if (!port_is_closed(port_id)) {
1800 printf("Please close port first\n");
1804 if (ports[port_id].flow_list)
1805 port_flow_flush(port_id);
1807 if (rte_eth_dev_detach(port_id, name)) {
1808 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1812 nb_ports = rte_eth_dev_count();
1814 printf("Port '%s' is detached. Now total ports is %d\n",
1826 stop_packet_forwarding();
1828 if (ports != NULL) {
1830 RTE_ETH_FOREACH_DEV(pt_id) {
1831 printf("\nShutting down port %d...\n", pt_id);
1837 printf("\nBye...\n");
1840 typedef void (*cmd_func_t)(void);
1841 struct pmd_test_command {
1842 const char *cmd_name;
1843 cmd_func_t cmd_func;
1846 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1848 /* Check the link status of all ports in up to 9s, and print them finally */
1850 check_all_ports_link_status(uint32_t port_mask)
1852 #define CHECK_INTERVAL 100 /* 100ms */
1853 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1855 uint8_t count, all_ports_up, print_flag = 0;
1856 struct rte_eth_link link;
1858 printf("Checking link statuses...\n");
1860 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1862 RTE_ETH_FOREACH_DEV(portid) {
1863 if ((port_mask & (1 << portid)) == 0)
1865 memset(&link, 0, sizeof(link));
1866 rte_eth_link_get_nowait(portid, &link);
1867 /* print link status if flag set */
1868 if (print_flag == 1) {
1869 if (link.link_status)
1871 "Port%d Link Up. speed %u Mbps- %s\n",
1872 portid, link.link_speed,
1873 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1874 ("full-duplex") : ("half-duplex\n"));
1876 printf("Port %d Link Down\n", portid);
1879 /* clear all_ports_up flag if any link down */
1880 if (link.link_status == ETH_LINK_DOWN) {
1885 /* after finally printing all link status, get out */
1886 if (print_flag == 1)
1889 if (all_ports_up == 0) {
1891 rte_delay_ms(CHECK_INTERVAL);
1894 /* set the print_flag if all ports up or timeout */
1895 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1905 rmv_event_callback(void *arg)
1907 struct rte_eth_dev *dev;
1908 portid_t port_id = (intptr_t)arg;
1910 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1911 dev = &rte_eth_devices[port_id];
1914 close_port(port_id);
1915 printf("removing device %s\n", dev->device->name);
1916 if (rte_eal_dev_detach(dev->device))
1917 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1921 /* This function is used by the interrupt thread */
1923 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1926 static const char * const event_desc[] = {
1927 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1928 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1929 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1930 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1931 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1932 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1933 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1934 [RTE_ETH_EVENT_MAX] = NULL,
1937 RTE_SET_USED(param);
1938 RTE_SET_USED(ret_param);
1940 if (type >= RTE_ETH_EVENT_MAX) {
1941 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1942 port_id, __func__, type);
1944 } else if (event_print_mask & (UINT32_C(1) << type)) {
1945 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1951 case RTE_ETH_EVENT_INTR_RMV:
1952 if (rte_eal_alarm_set(100000,
1953 rmv_event_callback, (void *)(intptr_t)port_id))
1954 fprintf(stderr, "Could not set up deferred device removal\n");
1963 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1967 uint8_t mapping_found = 0;
1969 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1970 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1971 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1972 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1973 tx_queue_stats_mappings[i].queue_id,
1974 tx_queue_stats_mappings[i].stats_counter_id);
1981 port->tx_queue_stats_mapping_enabled = 1;
1986 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1990 uint8_t mapping_found = 0;
1992 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1993 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1994 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1995 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1996 rx_queue_stats_mappings[i].queue_id,
1997 rx_queue_stats_mappings[i].stats_counter_id);
2004 port->rx_queue_stats_mapping_enabled = 1;
2009 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2013 diag = set_tx_queue_stats_mapping_registers(pi, port);
2015 if (diag == -ENOTSUP) {
2016 port->tx_queue_stats_mapping_enabled = 0;
2017 printf("TX queue stats mapping not supported port id=%d\n", pi);
2020 rte_exit(EXIT_FAILURE,
2021 "set_tx_queue_stats_mapping_registers "
2022 "failed for port id=%d diag=%d\n",
2026 diag = set_rx_queue_stats_mapping_registers(pi, port);
2028 if (diag == -ENOTSUP) {
2029 port->rx_queue_stats_mapping_enabled = 0;
2030 printf("RX queue stats mapping not supported port id=%d\n", pi);
2033 rte_exit(EXIT_FAILURE,
2034 "set_rx_queue_stats_mapping_registers "
2035 "failed for port id=%d diag=%d\n",
2041 rxtx_port_config(struct rte_port *port)
2043 port->rx_conf = port->dev_info.default_rxconf;
2044 port->tx_conf = port->dev_info.default_txconf;
2046 /* Check if any RX/TX parameters have been passed */
2047 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2048 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2050 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2051 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2053 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2054 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2056 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2057 port->rx_conf.rx_free_thresh = rx_free_thresh;
2059 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2060 port->rx_conf.rx_drop_en = rx_drop_en;
2062 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2063 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2065 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2066 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2068 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2069 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2071 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2072 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2074 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2075 port->tx_conf.tx_free_thresh = tx_free_thresh;
2077 if (txq_flags != RTE_PMD_PARAM_UNSET)
2078 port->tx_conf.txq_flags = txq_flags;
2082 init_port_config(void)
2085 struct rte_port *port;
2087 RTE_ETH_FOREACH_DEV(pid) {
2089 port->dev_conf.rxmode = rx_mode;
2090 port->dev_conf.fdir_conf = fdir_conf;
2092 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2093 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2095 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2096 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2099 if (port->dcb_flag == 0) {
2100 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2101 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2103 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2106 rxtx_port_config(port);
2108 rte_eth_macaddr_get(pid, &port->eth_addr);
2110 map_port_queue_stats_mapping_registers(pid, port);
2111 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2112 rte_pmd_ixgbe_bypass_init(pid);
2115 if (lsc_interrupt &&
2116 (rte_eth_devices[pid].data->dev_flags &
2117 RTE_ETH_DEV_INTR_LSC))
2118 port->dev_conf.intr_conf.lsc = 1;
2119 if (rmv_interrupt &&
2120 (rte_eth_devices[pid].data->dev_flags &
2121 RTE_ETH_DEV_INTR_RMV))
2122 port->dev_conf.intr_conf.rmv = 1;
2124 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2125 /* Detect softnic port */
2126 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2127 port->softnic_enable = 1;
2128 memset(&port->softport, 0, sizeof(struct softnic_port));
2130 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2131 port->softport.tm_flag = 1;
2137 void set_port_slave_flag(portid_t slave_pid)
2139 struct rte_port *port;
2141 port = &ports[slave_pid];
2142 port->slave_flag = 1;
2145 void clear_port_slave_flag(portid_t slave_pid)
2147 struct rte_port *port;
2149 port = &ports[slave_pid];
2150 port->slave_flag = 0;
2153 uint8_t port_is_bonding_slave(portid_t slave_pid)
2155 struct rte_port *port;
2157 port = &ports[slave_pid];
2158 return port->slave_flag;
2161 const uint16_t vlan_tags[] = {
2162 0, 1, 2, 3, 4, 5, 6, 7,
2163 8, 9, 10, 11, 12, 13, 14, 15,
2164 16, 17, 18, 19, 20, 21, 22, 23,
2165 24, 25, 26, 27, 28, 29, 30, 31
2169 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2170 enum dcb_mode_enable dcb_mode,
2171 enum rte_eth_nb_tcs num_tcs,
2177 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2178 * given above, and the number of traffic classes available for use.
2180 if (dcb_mode == DCB_VT_ENABLED) {
2181 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2182 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2183 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2184 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2186 /* VMDQ+DCB RX and TX configurations */
2187 vmdq_rx_conf->enable_default_pool = 0;
2188 vmdq_rx_conf->default_pool = 0;
2189 vmdq_rx_conf->nb_queue_pools =
2190 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2191 vmdq_tx_conf->nb_queue_pools =
2192 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2194 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2195 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2196 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2197 vmdq_rx_conf->pool_map[i].pools =
2198 1 << (i % vmdq_rx_conf->nb_queue_pools);
2200 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2201 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2202 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2205 /* set DCB mode of RX and TX of multiple queues */
2206 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2207 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2209 struct rte_eth_dcb_rx_conf *rx_conf =
2210 ð_conf->rx_adv_conf.dcb_rx_conf;
2211 struct rte_eth_dcb_tx_conf *tx_conf =
2212 ð_conf->tx_adv_conf.dcb_tx_conf;
2214 rx_conf->nb_tcs = num_tcs;
2215 tx_conf->nb_tcs = num_tcs;
2217 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2218 rx_conf->dcb_tc[i] = i % num_tcs;
2219 tx_conf->dcb_tc[i] = i % num_tcs;
2221 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2222 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2223 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2227 eth_conf->dcb_capability_en =
2228 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2230 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2236 init_port_dcb_config(portid_t pid,
2237 enum dcb_mode_enable dcb_mode,
2238 enum rte_eth_nb_tcs num_tcs,
2241 struct rte_eth_conf port_conf;
2242 struct rte_port *rte_port;
2246 rte_port = &ports[pid];
2248 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2249 /* Enter DCB configuration status */
2252 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2253 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2256 port_conf.rxmode.hw_vlan_filter = 1;
2259 * Write the configuration into the device.
2260 * Set the numbers of RX & TX queues to 0, so
2261 * the RX & TX queues will not be setup.
2263 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2265 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2267 /* If dev_info.vmdq_pool_base is greater than 0,
2268 * the queue id of vmdq pools is started after pf queues.
2270 if (dcb_mode == DCB_VT_ENABLED &&
2271 rte_port->dev_info.vmdq_pool_base > 0) {
2272 printf("VMDQ_DCB multi-queue mode is nonsensical"
2273 " for port %d.", pid);
2277 /* Assume the ports in testpmd have the same dcb capability
2278 * and has the same number of rxq and txq in dcb mode
2280 if (dcb_mode == DCB_VT_ENABLED) {
2281 if (rte_port->dev_info.max_vfs > 0) {
2282 nb_rxq = rte_port->dev_info.nb_rx_queues;
2283 nb_txq = rte_port->dev_info.nb_tx_queues;
2285 nb_rxq = rte_port->dev_info.max_rx_queues;
2286 nb_txq = rte_port->dev_info.max_tx_queues;
2289 /*if vt is disabled, use all pf queues */
2290 if (rte_port->dev_info.vmdq_pool_base == 0) {
2291 nb_rxq = rte_port->dev_info.max_rx_queues;
2292 nb_txq = rte_port->dev_info.max_tx_queues;
2294 nb_rxq = (queueid_t)num_tcs;
2295 nb_txq = (queueid_t)num_tcs;
2299 rx_free_thresh = 64;
2301 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2303 rxtx_port_config(rte_port);
2305 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2306 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2307 rx_vft_set(pid, vlan_tags[i], 1);
2309 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2310 map_port_queue_stats_mapping_registers(pid, rte_port);
2312 rte_port->dcb_flag = 1;
2320 /* Configuration of Ethernet ports. */
2321 ports = rte_zmalloc("testpmd: ports",
2322 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2323 RTE_CACHE_LINE_SIZE);
2324 if (ports == NULL) {
2325 rte_exit(EXIT_FAILURE,
2326 "rte_zmalloc(%d struct rte_port) failed\n",
2342 const char clr[] = { 27, '[', '2', 'J', '\0' };
2343 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2345 /* Clear screen and move to top left */
2346 printf("%s%s", clr, top_left);
2348 printf("\nPort statistics ====================================");
2349 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2350 nic_stats_display(fwd_ports_ids[i]);
2354 signal_handler(int signum)
2356 if (signum == SIGINT || signum == SIGTERM) {
2357 printf("\nSignal %d received, preparing to exit...\n",
2359 #ifdef RTE_LIBRTE_PDUMP
2360 /* uninitialize packet capture framework */
2363 #ifdef RTE_LIBRTE_LATENCY_STATS
2364 rte_latencystats_uninit();
2367 /* Set flag to indicate the force termination. */
2369 /* exit with the expected status */
2370 signal(signum, SIG_DFL);
2371 kill(getpid(), signum);
2376 main(int argc, char** argv)
2381 signal(SIGINT, signal_handler);
2382 signal(SIGTERM, signal_handler);
2384 diag = rte_eal_init(argc, argv);
2386 rte_panic("Cannot init EAL\n");
2388 testpmd_logtype = rte_log_register("testpmd");
2389 if (testpmd_logtype < 0)
2390 rte_panic("Cannot register log type");
2391 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2393 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2394 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2398 #ifdef RTE_LIBRTE_PDUMP
2399 /* initialize packet capture framework */
2400 rte_pdump_init(NULL);
2403 nb_ports = (portid_t) rte_eth_dev_count();
2405 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2407 /* allocate port structures, and init them */
2410 set_def_fwd_config();
2412 rte_panic("Empty set of forwarding logical cores - check the "
2413 "core mask supplied in the command parameters\n");
2415 /* Bitrate/latency stats disabled by default */
2416 #ifdef RTE_LIBRTE_BITRATE
2417 bitrate_enabled = 0;
2419 #ifdef RTE_LIBRTE_LATENCY_STATS
2420 latencystats_enabled = 0;
2426 launch_args_parse(argc, argv);
2428 if (tx_first && interactive)
2429 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2430 "interactive mode.\n");
2432 if (tx_first && lsc_interrupt) {
2433 printf("Warning: lsc_interrupt needs to be off when "
2434 " using tx_first. Disabling.\n");
2438 if (!nb_rxq && !nb_txq)
2439 printf("Warning: Either rx or tx queues should be non-zero\n");
2441 if (nb_rxq > 1 && nb_rxq > nb_txq)
2442 printf("Warning: nb_rxq=%d enables RSS configuration, "
2443 "but nb_txq=%d will prevent to fully test it.\n",
2447 if (start_port(RTE_PORT_ALL) != 0)
2448 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2450 /* set all ports to promiscuous mode by default */
2451 RTE_ETH_FOREACH_DEV(port_id)
2452 rte_eth_promiscuous_enable(port_id);
2454 /* Init metrics library */
2455 rte_metrics_init(rte_socket_id());
2457 #ifdef RTE_LIBRTE_LATENCY_STATS
2458 if (latencystats_enabled != 0) {
2459 int ret = rte_latencystats_init(1, NULL);
2461 printf("Warning: latencystats init()"
2462 " returned error %d\n", ret);
2463 printf("Latencystats running on lcore %d\n",
2464 latencystats_lcore_id);
2468 /* Setup bitrate stats */
2469 #ifdef RTE_LIBRTE_BITRATE
2470 if (bitrate_enabled != 0) {
2471 bitrate_data = rte_stats_bitrate_create();
2472 if (bitrate_data == NULL)
2473 rte_exit(EXIT_FAILURE,
2474 "Could not allocate bitrate data.\n");
2475 rte_stats_bitrate_reg(bitrate_data);
2479 #ifdef RTE_LIBRTE_CMDLINE
2480 if (strlen(cmdline_filename) != 0)
2481 cmdline_read_from_file(cmdline_filename);
2483 if (interactive == 1) {
2485 printf("Start automatic packet forwarding\n");
2486 start_packet_forwarding(0);
2498 printf("No commandline core given, start packet forwarding\n");
2499 start_packet_forwarding(tx_first);
2500 if (stats_period != 0) {
2501 uint64_t prev_time = 0, cur_time, diff_time = 0;
2502 uint64_t timer_period;
2504 /* Convert to number of cycles */
2505 timer_period = stats_period * rte_get_timer_hz();
2507 while (f_quit == 0) {
2508 cur_time = rte_get_timer_cycles();
2509 diff_time += cur_time - prev_time;
2511 if (diff_time >= timer_period) {
2513 /* Reset the timer */
2516 /* Sleep to avoid unnecessary checks */
2517 prev_time = cur_time;
2522 printf("Press enter to exit\n");
2523 rc = read(0, &c, 1);