4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
99 char cmdline_filename[PATH_MAX] = {0};
102 * NUMA support configuration.
103 * When set, the NUMA support attempts to dispatch the allocation of the
104 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105 * probed ports among the CPU sockets 0 and 1.
106 * Otherwise, all memory is allocated from CPU socket 0.
108 uint8_t numa_support = 1; /**< numa enabled by default */
111 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114 uint8_t socket_num = UMA_NO_CONFIG;
117 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122 * Record the Ethernet address of peer target ports to which packets are
124 * Must be instantiated with the ethernet addresses of peer traffic generator
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
131 * Probed Target Environment.
133 struct rte_port *ports; /**< For all probed ethernet ports. */
134 portid_t nb_ports; /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
139 * Test Forwarding Configuration.
140 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t nb_cfg_ports; /**< Number of configured ports. */
146 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
155 * Forwarding engines.
157 struct fwd_engine * fwd_engines[] = {
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168 &softnic_tm_bypass_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171 &ieee1588_fwd_engine,
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
184 * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
188 * In container, it cannot terminate the process which running with 'stats-period'
189 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
194 * Configuration of packet segments used by the "txonly" processing engine.
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198 TXONLY_DEF_PACKET_LEN,
200 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
215 * Configurable number of RX/TX queues.
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
221 * Configurable number of RX/TX ring descriptors.
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228 #define RTE_PMD_PARAM_UNSET -1
230 * Configurable values of RX and TX ring threshold registers.
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
242 * Configurable value of RX free threshold.
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
247 * Configurable value of RX drop enable.
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
252 * Configurable value of TX free threshold.
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
257 * Configurable value of TX RS bit threshold.
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of TX queue flags.
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
267 * Receive Side Scaling (RSS) configuration.
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
272 * Port topology configuration
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
277 * Avoids to flush all the RX streams before starts forwarding.
279 uint8_t no_flush_rx = 0; /* flush by default */
282 * Flow API isolated mode.
284 uint8_t flow_isolate_all;
287 * Avoids to check link status when starting/stopping a port.
289 uint8_t no_link_check = 0; /* check by default */
292 * Enable link status change notification
294 uint8_t lsc_interrupt = 1; /* enabled by default */
297 * Enable device removal notification.
299 uint8_t rmv_interrupt = 1; /* enabled by default */
302 * Display or mask ether events
303 * Default to all events except VF_MBOX
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
313 * NIC bypass mode configuration options.
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
322 #ifdef RTE_LIBRTE_LATENCY_STATS
325 * Set when latency stats is enabled in the commandline
327 uint8_t latencystats_enabled;
330 * Lcore ID to serive latency statistics.
332 lcoreid_t latencystats_lcore_id = -1;
337 * Ethernet device configuration.
339 struct rte_eth_rxmode rx_mode = {
340 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342 .header_split = 0, /**< Header Split disabled. */
343 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
346 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
348 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
349 .hw_timestamp = 0, /**< HW timestamp enabled. */
352 struct rte_fdir_conf fdir_conf = {
353 .mode = RTE_FDIR_MODE_NONE,
354 .pballoc = RTE_FDIR_PBALLOC_64K,
355 .status = RTE_FDIR_REPORT_STATUS,
357 .vlan_tci_mask = 0x0,
359 .src_ip = 0xFFFFFFFF,
360 .dst_ip = 0xFFFFFFFF,
363 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
366 .src_port_mask = 0xFFFF,
367 .dst_port_mask = 0xFFFF,
368 .mac_addr_byte_mask = 0xFF,
369 .tunnel_type_mask = 1,
370 .tunnel_id_mask = 0xFFFFFFFF,
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
387 * Display zero values by default for xstats
389 uint8_t xstats_hide_zero;
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406 struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409 enum rte_eth_event_type type,
410 void *param, void *ret_param);
413 * Check if all the ports are started.
414 * If yes, return positive value. If not, return zero.
416 static int all_ports_started(void);
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
422 * Helper function to check if socket is already discovered.
423 * If yes, return positive value. If not, return zero.
426 new_socket_id(unsigned int socket_id)
430 for (i = 0; i < num_sockets; i++) {
431 if (socket_ids[i] == socket_id)
438 * Setup default configuration.
441 set_default_fwd_lcores_config(void)
445 unsigned int sock_num;
448 for (i = 0; i < RTE_MAX_LCORE; i++) {
449 sock_num = rte_lcore_to_socket_id(i);
450 if (new_socket_id(sock_num)) {
451 if (num_sockets >= RTE_MAX_NUMA_NODES) {
452 rte_exit(EXIT_FAILURE,
453 "Total sockets greater than %u\n",
456 socket_ids[num_sockets++] = sock_num;
458 if (!rte_lcore_is_enabled(i))
460 if (i == rte_get_master_lcore())
462 fwd_lcores_cpuids[nb_lc++] = i;
464 nb_lcores = (lcoreid_t) nb_lc;
465 nb_cfg_lcores = nb_lcores;
470 set_def_peer_eth_addrs(void)
474 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476 peer_eth_addrs[i].addr_bytes[5] = i;
481 set_default_fwd_ports_config(void)
486 RTE_ETH_FOREACH_DEV(pt_id)
487 fwd_ports_ids[i++] = pt_id;
489 nb_cfg_ports = nb_ports;
490 nb_fwd_ports = nb_ports;
494 set_def_fwd_config(void)
496 set_default_fwd_lcores_config();
497 set_def_peer_eth_addrs();
498 set_default_fwd_ports_config();
502 * Configuration initialisation done once at init time.
505 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
506 unsigned int socket_id)
508 char pool_name[RTE_MEMPOOL_NAMESIZE];
509 struct rte_mempool *rte_mp = NULL;
512 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
513 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
516 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
517 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
520 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
521 mb_size, (unsigned) mb_mempool_cache,
522 sizeof(struct rte_pktmbuf_pool_private),
527 if (rte_mempool_populate_anon(rte_mp) == 0) {
528 rte_mempool_free(rte_mp);
532 rte_pktmbuf_pool_init(rte_mp, NULL);
533 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
535 /* wrapper to rte_mempool_create() */
536 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
541 if (rte_mp == NULL) {
542 rte_exit(EXIT_FAILURE,
543 "Creation of mbuf pool for socket %u failed: %s\n",
544 socket_id, rte_strerror(rte_errno));
545 } else if (verbose_level > 0) {
546 rte_mempool_dump(stdout, rte_mp);
551 * Check given socket id is valid or not with NUMA mode,
552 * if valid, return 0, else return -1
555 check_socket_id(const unsigned int socket_id)
557 static int warning_once = 0;
559 if (new_socket_id(socket_id)) {
560 if (!warning_once && numa_support)
561 printf("Warning: NUMA should be configured manually by"
562 " using --port-numa-config and"
563 " --ring-numa-config parameters along with"
575 struct rte_port *port;
576 struct rte_mempool *mbp;
577 unsigned int nb_mbuf_per_pool;
579 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
580 struct rte_gro_param gro_param;
583 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
586 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
587 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
588 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
591 /* Configuration of logical cores. */
592 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
593 sizeof(struct fwd_lcore *) * nb_lcores,
594 RTE_CACHE_LINE_SIZE);
595 if (fwd_lcores == NULL) {
596 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
597 "failed\n", nb_lcores);
599 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
600 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
601 sizeof(struct fwd_lcore),
602 RTE_CACHE_LINE_SIZE);
603 if (fwd_lcores[lc_id] == NULL) {
604 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
607 fwd_lcores[lc_id]->cpuid_idx = lc_id;
610 RTE_ETH_FOREACH_DEV(pid) {
612 rte_eth_dev_info_get(pid, &port->dev_info);
615 if (port_numa[pid] != NUMA_NO_CONFIG)
616 port_per_socket[port_numa[pid]]++;
618 uint32_t socket_id = rte_eth_dev_socket_id(pid);
620 /* if socket_id is invalid, set to 0 */
621 if (check_socket_id(socket_id) < 0)
623 port_per_socket[socket_id]++;
627 /* set flag to initialize port/queue */
628 port->need_reconfig = 1;
629 port->need_reconfig_queues = 1;
633 * Create pools of mbuf.
634 * If NUMA support is disabled, create a single pool of mbuf in
635 * socket 0 memory by default.
636 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
638 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
639 * nb_txd can be configured at run time.
641 if (param_total_num_mbufs)
642 nb_mbuf_per_pool = param_total_num_mbufs;
644 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
645 (nb_lcores * mb_mempool_cache) +
646 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
647 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
653 for (i = 0; i < num_sockets; i++)
654 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
657 if (socket_num == UMA_NO_CONFIG)
658 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
660 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
666 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
667 DEV_TX_OFFLOAD_GRE_TNL_TSO;
669 * Records which Mbuf pool to use by each logical core, if needed.
671 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
672 mbp = mbuf_pool_find(
673 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
676 mbp = mbuf_pool_find(0);
677 fwd_lcores[lc_id]->mbp = mbp;
678 /* initialize GSO context */
679 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
680 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
681 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
682 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
684 fwd_lcores[lc_id]->gso_ctx.flag = 0;
687 /* Configuration of packet forwarding streams. */
688 if (init_fwd_streams() < 0)
689 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
693 /* create a gro context for each lcore */
694 gro_param.gro_types = RTE_GRO_TCP_IPV4;
695 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
696 gro_param.max_item_per_flow = MAX_PKT_BURST;
697 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
698 gro_param.socket_id = rte_lcore_to_socket_id(
699 fwd_lcores_cpuids[lc_id]);
700 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
701 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
702 rte_exit(EXIT_FAILURE,
703 "rte_gro_ctx_create() failed\n");
710 reconfig(portid_t new_port_id, unsigned socket_id)
712 struct rte_port *port;
714 /* Reconfiguration of Ethernet ports. */
715 port = &ports[new_port_id];
716 rte_eth_dev_info_get(new_port_id, &port->dev_info);
718 /* set flag to initialize port/queue */
719 port->need_reconfig = 1;
720 port->need_reconfig_queues = 1;
721 port->socket_id = socket_id;
728 init_fwd_streams(void)
731 struct rte_port *port;
732 streamid_t sm_id, nb_fwd_streams_new;
735 /* set socket id according to numa or not */
736 RTE_ETH_FOREACH_DEV(pid) {
738 if (nb_rxq > port->dev_info.max_rx_queues) {
739 printf("Fail: nb_rxq(%d) is greater than "
740 "max_rx_queues(%d)\n", nb_rxq,
741 port->dev_info.max_rx_queues);
744 if (nb_txq > port->dev_info.max_tx_queues) {
745 printf("Fail: nb_txq(%d) is greater than "
746 "max_tx_queues(%d)\n", nb_txq,
747 port->dev_info.max_tx_queues);
751 if (port_numa[pid] != NUMA_NO_CONFIG)
752 port->socket_id = port_numa[pid];
754 port->socket_id = rte_eth_dev_socket_id(pid);
756 /* if socket_id is invalid, set to 0 */
757 if (check_socket_id(port->socket_id) < 0)
762 if (socket_num == UMA_NO_CONFIG)
765 port->socket_id = socket_num;
769 q = RTE_MAX(nb_rxq, nb_txq);
771 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
774 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
775 if (nb_fwd_streams_new == nb_fwd_streams)
778 if (fwd_streams != NULL) {
779 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
780 if (fwd_streams[sm_id] == NULL)
782 rte_free(fwd_streams[sm_id]);
783 fwd_streams[sm_id] = NULL;
785 rte_free(fwd_streams);
790 nb_fwd_streams = nb_fwd_streams_new;
791 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
792 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
793 if (fwd_streams == NULL)
794 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
795 "failed\n", nb_fwd_streams);
797 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
798 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
799 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
800 if (fwd_streams[sm_id] == NULL)
801 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
808 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
810 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
812 unsigned int total_burst;
813 unsigned int nb_burst;
814 unsigned int burst_stats[3];
815 uint16_t pktnb_stats[3];
817 int burst_percent[3];
820 * First compute the total number of packet bursts and the
821 * two highest numbers of bursts of the same number of packets.
824 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
825 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
826 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
827 nb_burst = pbs->pkt_burst_spread[nb_pkt];
830 total_burst += nb_burst;
831 if (nb_burst > burst_stats[0]) {
832 burst_stats[1] = burst_stats[0];
833 pktnb_stats[1] = pktnb_stats[0];
834 burst_stats[0] = nb_burst;
835 pktnb_stats[0] = nb_pkt;
838 if (total_burst == 0)
840 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
841 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
842 burst_percent[0], (int) pktnb_stats[0]);
843 if (burst_stats[0] == total_burst) {
847 if (burst_stats[0] + burst_stats[1] == total_burst) {
848 printf(" + %d%% of %d pkts]\n",
849 100 - burst_percent[0], pktnb_stats[1]);
852 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
853 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
854 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
855 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
858 printf(" + %d%% of %d pkts + %d%% of others]\n",
859 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
861 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
864 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
866 struct rte_port *port;
869 static const char *fwd_stats_border = "----------------------";
871 port = &ports[port_id];
872 printf("\n %s Forward statistics for port %-2d %s\n",
873 fwd_stats_border, port_id, fwd_stats_border);
875 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
876 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
878 stats->ipackets, stats->imissed,
879 (uint64_t) (stats->ipackets + stats->imissed));
881 if (cur_fwd_eng == &csum_fwd_engine)
882 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
883 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
884 if ((stats->ierrors + stats->rx_nombuf) > 0) {
885 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
886 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
889 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
891 stats->opackets, port->tx_dropped,
892 (uint64_t) (stats->opackets + port->tx_dropped));
895 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
897 stats->ipackets, stats->imissed,
898 (uint64_t) (stats->ipackets + stats->imissed));
900 if (cur_fwd_eng == &csum_fwd_engine)
901 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
902 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
903 if ((stats->ierrors + stats->rx_nombuf) > 0) {
904 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
905 printf(" RX-nombufs: %14"PRIu64"\n",
909 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
911 stats->opackets, port->tx_dropped,
912 (uint64_t) (stats->opackets + port->tx_dropped));
915 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
917 pkt_burst_stats_display("RX",
918 &port->rx_stream->rx_burst_stats);
920 pkt_burst_stats_display("TX",
921 &port->tx_stream->tx_burst_stats);
924 if (port->rx_queue_stats_mapping_enabled) {
926 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
927 printf(" Stats reg %2d RX-packets:%14"PRIu64
928 " RX-errors:%14"PRIu64
929 " RX-bytes:%14"PRIu64"\n",
930 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
934 if (port->tx_queue_stats_mapping_enabled) {
935 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
936 printf(" Stats reg %2d TX-packets:%14"PRIu64
937 " TX-bytes:%14"PRIu64"\n",
938 i, stats->q_opackets[i], stats->q_obytes[i]);
942 printf(" %s--------------------------------%s\n",
943 fwd_stats_border, fwd_stats_border);
947 fwd_stream_stats_display(streamid_t stream_id)
949 struct fwd_stream *fs;
950 static const char *fwd_top_stats_border = "-------";
952 fs = fwd_streams[stream_id];
953 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
954 (fs->fwd_dropped == 0))
956 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
957 "TX Port=%2d/Queue=%2d %s\n",
958 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
959 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
960 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
961 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
963 /* if checksum mode */
964 if (cur_fwd_eng == &csum_fwd_engine) {
965 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
966 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
969 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
970 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
971 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
976 flush_fwd_rx_queues(void)
978 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
985 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
986 uint64_t timer_period;
988 /* convert to number of cycles */
989 timer_period = rte_get_timer_hz(); /* 1 second timeout */
991 for (j = 0; j < 2; j++) {
992 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
993 for (rxq = 0; rxq < nb_rxq; rxq++) {
994 port_id = fwd_ports_ids[rxp];
996 * testpmd can stuck in the below do while loop
997 * if rte_eth_rx_burst() always returns nonzero
998 * packets. So timer is added to exit this loop
999 * after 1sec timer expiry.
1001 prev_tsc = rte_rdtsc();
1003 nb_rx = rte_eth_rx_burst(port_id, rxq,
1004 pkts_burst, MAX_PKT_BURST);
1005 for (i = 0; i < nb_rx; i++)
1006 rte_pktmbuf_free(pkts_burst[i]);
1008 cur_tsc = rte_rdtsc();
1009 diff_tsc = cur_tsc - prev_tsc;
1010 timer_tsc += diff_tsc;
1011 } while ((nb_rx > 0) &&
1012 (timer_tsc < timer_period));
1016 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1021 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1023 struct fwd_stream **fsm;
1026 #ifdef RTE_LIBRTE_BITRATE
1027 uint64_t tics_per_1sec;
1028 uint64_t tics_datum;
1029 uint64_t tics_current;
1030 uint8_t idx_port, cnt_ports;
1032 cnt_ports = rte_eth_dev_count();
1033 tics_datum = rte_rdtsc();
1034 tics_per_1sec = rte_get_timer_hz();
1036 fsm = &fwd_streams[fc->stream_idx];
1037 nb_fs = fc->stream_nb;
1039 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1040 (*pkt_fwd)(fsm[sm_id]);
1041 #ifdef RTE_LIBRTE_BITRATE
1042 if (bitrate_enabled != 0 &&
1043 bitrate_lcore_id == rte_lcore_id()) {
1044 tics_current = rte_rdtsc();
1045 if (tics_current - tics_datum >= tics_per_1sec) {
1046 /* Periodic bitrate calculation */
1048 idx_port < cnt_ports;
1050 rte_stats_bitrate_calc(bitrate_data,
1052 tics_datum = tics_current;
1056 #ifdef RTE_LIBRTE_LATENCY_STATS
1057 if (latencystats_enabled != 0 &&
1058 latencystats_lcore_id == rte_lcore_id())
1059 rte_latencystats_update();
1062 } while (! fc->stopped);
1066 start_pkt_forward_on_core(void *fwd_arg)
1068 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1069 cur_fwd_config.fwd_eng->packet_fwd);
1074 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1075 * Used to start communication flows in network loopback test configurations.
1078 run_one_txonly_burst_on_core(void *fwd_arg)
1080 struct fwd_lcore *fwd_lc;
1081 struct fwd_lcore tmp_lcore;
1083 fwd_lc = (struct fwd_lcore *) fwd_arg;
1084 tmp_lcore = *fwd_lc;
1085 tmp_lcore.stopped = 1;
1086 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1091 * Launch packet forwarding:
1092 * - Setup per-port forwarding context.
1093 * - launch logical cores with their forwarding configuration.
1096 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1098 port_fwd_begin_t port_fwd_begin;
1103 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1104 if (port_fwd_begin != NULL) {
1105 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1106 (*port_fwd_begin)(fwd_ports_ids[i]);
1108 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1109 lc_id = fwd_lcores_cpuids[i];
1110 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1111 fwd_lcores[i]->stopped = 0;
1112 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1113 fwd_lcores[i], lc_id);
1115 printf("launch lcore %u failed - diag=%d\n",
1122 * Launch packet forwarding configuration.
1125 start_packet_forwarding(int with_tx_first)
1127 port_fwd_begin_t port_fwd_begin;
1128 port_fwd_end_t port_fwd_end;
1129 struct rte_port *port;
1134 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1135 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1137 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1138 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1140 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1141 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1142 (!nb_rxq || !nb_txq))
1143 rte_exit(EXIT_FAILURE,
1144 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1145 cur_fwd_eng->fwd_mode_name);
1147 if (all_ports_started() == 0) {
1148 printf("Not all ports were started\n");
1151 if (test_done == 0) {
1152 printf("Packet forwarding already started\n");
1156 if (init_fwd_streams() < 0) {
1157 printf("Fail from init_fwd_streams()\n");
1162 for (i = 0; i < nb_fwd_ports; i++) {
1163 pt_id = fwd_ports_ids[i];
1164 port = &ports[pt_id];
1165 if (!port->dcb_flag) {
1166 printf("In DCB mode, all forwarding ports must "
1167 "be configured in this mode.\n");
1171 if (nb_fwd_lcores == 1) {
1172 printf("In DCB mode,the nb forwarding cores "
1173 "should be larger than 1.\n");
1180 flush_fwd_rx_queues();
1183 pkt_fwd_config_display(&cur_fwd_config);
1184 rxtx_config_display();
1186 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1187 pt_id = fwd_ports_ids[i];
1188 port = &ports[pt_id];
1189 rte_eth_stats_get(pt_id, &port->stats);
1190 port->tx_dropped = 0;
1192 map_port_queue_stats_mapping_registers(pt_id, port);
1194 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1195 fwd_streams[sm_id]->rx_packets = 0;
1196 fwd_streams[sm_id]->tx_packets = 0;
1197 fwd_streams[sm_id]->fwd_dropped = 0;
1198 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1199 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1201 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1202 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1203 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1204 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1205 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1207 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1208 fwd_streams[sm_id]->core_cycles = 0;
1211 if (with_tx_first) {
1212 port_fwd_begin = tx_only_engine.port_fwd_begin;
1213 if (port_fwd_begin != NULL) {
1214 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1215 (*port_fwd_begin)(fwd_ports_ids[i]);
1217 while (with_tx_first--) {
1218 launch_packet_forwarding(
1219 run_one_txonly_burst_on_core);
1220 rte_eal_mp_wait_lcore();
1222 port_fwd_end = tx_only_engine.port_fwd_end;
1223 if (port_fwd_end != NULL) {
1224 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1225 (*port_fwd_end)(fwd_ports_ids[i]);
1228 launch_packet_forwarding(start_pkt_forward_on_core);
1232 stop_packet_forwarding(void)
1234 struct rte_eth_stats stats;
1235 struct rte_port *port;
1236 port_fwd_end_t port_fwd_end;
1241 uint64_t total_recv;
1242 uint64_t total_xmit;
1243 uint64_t total_rx_dropped;
1244 uint64_t total_tx_dropped;
1245 uint64_t total_rx_nombuf;
1246 uint64_t tx_dropped;
1247 uint64_t rx_bad_ip_csum;
1248 uint64_t rx_bad_l4_csum;
1249 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1250 uint64_t fwd_cycles;
1253 static const char *acc_stats_border = "+++++++++++++++";
1256 printf("Packet forwarding not started\n");
1259 printf("Telling cores to stop...");
1260 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1261 fwd_lcores[lc_id]->stopped = 1;
1262 printf("\nWaiting for lcores to finish...\n");
1263 rte_eal_mp_wait_lcore();
1264 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1265 if (port_fwd_end != NULL) {
1266 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1267 pt_id = fwd_ports_ids[i];
1268 (*port_fwd_end)(pt_id);
1271 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1274 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1275 if (cur_fwd_config.nb_fwd_streams >
1276 cur_fwd_config.nb_fwd_ports) {
1277 fwd_stream_stats_display(sm_id);
1278 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1279 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1281 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1283 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1286 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1287 tx_dropped = (uint64_t) (tx_dropped +
1288 fwd_streams[sm_id]->fwd_dropped);
1289 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1292 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1293 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1294 fwd_streams[sm_id]->rx_bad_ip_csum);
1295 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1299 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1300 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1301 fwd_streams[sm_id]->rx_bad_l4_csum);
1302 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1305 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1306 fwd_cycles = (uint64_t) (fwd_cycles +
1307 fwd_streams[sm_id]->core_cycles);
1312 total_rx_dropped = 0;
1313 total_tx_dropped = 0;
1314 total_rx_nombuf = 0;
1315 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1316 pt_id = fwd_ports_ids[i];
1318 port = &ports[pt_id];
1319 rte_eth_stats_get(pt_id, &stats);
1320 stats.ipackets -= port->stats.ipackets;
1321 port->stats.ipackets = 0;
1322 stats.opackets -= port->stats.opackets;
1323 port->stats.opackets = 0;
1324 stats.ibytes -= port->stats.ibytes;
1325 port->stats.ibytes = 0;
1326 stats.obytes -= port->stats.obytes;
1327 port->stats.obytes = 0;
1328 stats.imissed -= port->stats.imissed;
1329 port->stats.imissed = 0;
1330 stats.oerrors -= port->stats.oerrors;
1331 port->stats.oerrors = 0;
1332 stats.rx_nombuf -= port->stats.rx_nombuf;
1333 port->stats.rx_nombuf = 0;
1335 total_recv += stats.ipackets;
1336 total_xmit += stats.opackets;
1337 total_rx_dropped += stats.imissed;
1338 total_tx_dropped += port->tx_dropped;
1339 total_rx_nombuf += stats.rx_nombuf;
1341 fwd_port_stats_display(pt_id, &stats);
1344 printf("\n %s Accumulated forward statistics for all ports"
1346 acc_stats_border, acc_stats_border);
1347 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1349 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1351 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1352 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1353 if (total_rx_nombuf > 0)
1354 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1355 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1357 acc_stats_border, acc_stats_border);
1358 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1360 printf("\n CPU cycles/packet=%u (total cycles="
1361 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1362 (unsigned int)(fwd_cycles / total_recv),
1363 fwd_cycles, total_recv);
1365 printf("\nDone.\n");
1370 dev_set_link_up(portid_t pid)
1372 if (rte_eth_dev_set_link_up(pid) < 0)
1373 printf("\nSet link up fail.\n");
1377 dev_set_link_down(portid_t pid)
1379 if (rte_eth_dev_set_link_down(pid) < 0)
1380 printf("\nSet link down fail.\n");
1384 all_ports_started(void)
1387 struct rte_port *port;
1389 RTE_ETH_FOREACH_DEV(pi) {
1391 /* Check if there is a port which is not started */
1392 if ((port->port_status != RTE_PORT_STARTED) &&
1393 (port->slave_flag == 0))
1397 /* No port is not started */
1402 all_ports_stopped(void)
1405 struct rte_port *port;
1407 RTE_ETH_FOREACH_DEV(pi) {
1409 if ((port->port_status != RTE_PORT_STOPPED) &&
1410 (port->slave_flag == 0))
1418 port_is_started(portid_t port_id)
1420 if (port_id_is_invalid(port_id, ENABLED_WARN))
1423 if (ports[port_id].port_status != RTE_PORT_STARTED)
1430 port_is_closed(portid_t port_id)
1432 if (port_id_is_invalid(port_id, ENABLED_WARN))
1435 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1442 start_port(portid_t pid)
1444 int diag, need_check_link_status = -1;
1447 struct rte_port *port;
1448 struct ether_addr mac_addr;
1449 enum rte_eth_event_type event_type;
1451 if (port_id_is_invalid(pid, ENABLED_WARN))
1456 RTE_ETH_FOREACH_DEV(pi) {
1457 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1460 need_check_link_status = 0;
1462 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1463 RTE_PORT_HANDLING) == 0) {
1464 printf("Port %d is now not stopped\n", pi);
1468 if (port->need_reconfig > 0) {
1469 port->need_reconfig = 0;
1471 if (flow_isolate_all) {
1472 int ret = port_flow_isolate(pi, 1);
1474 printf("Failed to apply isolated"
1475 " mode on port %d\n", pi);
1480 printf("Configuring Port %d (socket %u)\n", pi,
1482 /* configure port */
1483 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1486 if (rte_atomic16_cmpset(&(port->port_status),
1487 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1488 printf("Port %d can not be set back "
1489 "to stopped\n", pi);
1490 printf("Fail to configure port %d\n", pi);
1491 /* try to reconfigure port next time */
1492 port->need_reconfig = 1;
1496 if (port->need_reconfig_queues > 0) {
1497 port->need_reconfig_queues = 0;
1498 /* setup tx queues */
1499 for (qi = 0; qi < nb_txq; qi++) {
1500 if ((numa_support) &&
1501 (txring_numa[pi] != NUMA_NO_CONFIG))
1502 diag = rte_eth_tx_queue_setup(pi, qi,
1503 nb_txd,txring_numa[pi],
1506 diag = rte_eth_tx_queue_setup(pi, qi,
1507 nb_txd,port->socket_id,
1513 /* Fail to setup tx queue, return */
1514 if (rte_atomic16_cmpset(&(port->port_status),
1516 RTE_PORT_STOPPED) == 0)
1517 printf("Port %d can not be set back "
1518 "to stopped\n", pi);
1519 printf("Fail to configure port %d tx queues\n", pi);
1520 /* try to reconfigure queues next time */
1521 port->need_reconfig_queues = 1;
1524 /* setup rx queues */
1525 for (qi = 0; qi < nb_rxq; qi++) {
1526 if ((numa_support) &&
1527 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1528 struct rte_mempool * mp =
1529 mbuf_pool_find(rxring_numa[pi]);
1531 printf("Failed to setup RX queue:"
1532 "No mempool allocation"
1533 " on the socket %d\n",
1538 diag = rte_eth_rx_queue_setup(pi, qi,
1539 nb_rxd,rxring_numa[pi],
1540 &(port->rx_conf),mp);
1542 struct rte_mempool *mp =
1543 mbuf_pool_find(port->socket_id);
1545 printf("Failed to setup RX queue:"
1546 "No mempool allocation"
1547 " on the socket %d\n",
1551 diag = rte_eth_rx_queue_setup(pi, qi,
1552 nb_rxd,port->socket_id,
1553 &(port->rx_conf), mp);
1558 /* Fail to setup rx queue, return */
1559 if (rte_atomic16_cmpset(&(port->port_status),
1561 RTE_PORT_STOPPED) == 0)
1562 printf("Port %d can not be set back "
1563 "to stopped\n", pi);
1564 printf("Fail to configure port %d rx queues\n", pi);
1565 /* try to reconfigure queues next time */
1566 port->need_reconfig_queues = 1;
1571 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1572 event_type < RTE_ETH_EVENT_MAX;
1574 diag = rte_eth_dev_callback_register(pi,
1579 printf("Failed to setup even callback for event %d\n",
1586 if (rte_eth_dev_start(pi) < 0) {
1587 printf("Fail to start port %d\n", pi);
1589 /* Fail to setup rx queue, return */
1590 if (rte_atomic16_cmpset(&(port->port_status),
1591 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1592 printf("Port %d can not be set back to "
1597 if (rte_atomic16_cmpset(&(port->port_status),
1598 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1599 printf("Port %d can not be set into started\n", pi);
1601 rte_eth_macaddr_get(pi, &mac_addr);
1602 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1603 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1604 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1605 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1607 /* at least one port started, need checking link status */
1608 need_check_link_status = 1;
1611 if (need_check_link_status == 1 && !no_link_check)
1612 check_all_ports_link_status(RTE_PORT_ALL);
1613 else if (need_check_link_status == 0)
1614 printf("Please stop the ports first\n");
1621 stop_port(portid_t pid)
1624 struct rte_port *port;
1625 int need_check_link_status = 0;
1632 if (port_id_is_invalid(pid, ENABLED_WARN))
1635 printf("Stopping ports...\n");
1637 RTE_ETH_FOREACH_DEV(pi) {
1638 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1641 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1642 printf("Please remove port %d from forwarding configuration.\n", pi);
1646 if (port_is_bonding_slave(pi)) {
1647 printf("Please remove port %d from bonded device.\n", pi);
1652 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1653 RTE_PORT_HANDLING) == 0)
1656 rte_eth_dev_stop(pi);
1658 if (rte_atomic16_cmpset(&(port->port_status),
1659 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1660 printf("Port %d can not be set into stopped\n", pi);
1661 need_check_link_status = 1;
1663 if (need_check_link_status && !no_link_check)
1664 check_all_ports_link_status(RTE_PORT_ALL);
1670 close_port(portid_t pid)
1673 struct rte_port *port;
1675 if (port_id_is_invalid(pid, ENABLED_WARN))
1678 printf("Closing ports...\n");
1680 RTE_ETH_FOREACH_DEV(pi) {
1681 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1684 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1685 printf("Please remove port %d from forwarding configuration.\n", pi);
1689 if (port_is_bonding_slave(pi)) {
1690 printf("Please remove port %d from bonded device.\n", pi);
1695 if (rte_atomic16_cmpset(&(port->port_status),
1696 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1697 printf("Port %d is already closed\n", pi);
1701 if (rte_atomic16_cmpset(&(port->port_status),
1702 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1703 printf("Port %d is now not stopped\n", pi);
1707 if (port->flow_list)
1708 port_flow_flush(pi);
1709 rte_eth_dev_close(pi);
1711 if (rte_atomic16_cmpset(&(port->port_status),
1712 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1713 printf("Port %d cannot be set to closed\n", pi);
1720 reset_port(portid_t pid)
1724 struct rte_port *port;
1726 if (port_id_is_invalid(pid, ENABLED_WARN))
1729 printf("Resetting ports...\n");
1731 RTE_ETH_FOREACH_DEV(pi) {
1732 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1735 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1736 printf("Please remove port %d from forwarding "
1737 "configuration.\n", pi);
1741 if (port_is_bonding_slave(pi)) {
1742 printf("Please remove port %d from bonded device.\n",
1747 diag = rte_eth_dev_reset(pi);
1750 port->need_reconfig = 1;
1751 port->need_reconfig_queues = 1;
1753 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1761 attach_port(char *identifier)
1764 unsigned int socket_id;
1766 printf("Attaching a new port...\n");
1768 if (identifier == NULL) {
1769 printf("Invalid parameters are specified\n");
1773 if (rte_eth_dev_attach(identifier, &pi))
1776 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1777 /* if socket_id is invalid, set to 0 */
1778 if (check_socket_id(socket_id) < 0)
1780 reconfig(pi, socket_id);
1781 rte_eth_promiscuous_enable(pi);
1783 nb_ports = rte_eth_dev_count();
1785 ports[pi].port_status = RTE_PORT_STOPPED;
1787 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1792 detach_port(portid_t port_id)
1794 char name[RTE_ETH_NAME_MAX_LEN];
1796 printf("Detaching a port...\n");
1798 if (!port_is_closed(port_id)) {
1799 printf("Please close port first\n");
1803 if (ports[port_id].flow_list)
1804 port_flow_flush(port_id);
1806 if (rte_eth_dev_detach(port_id, name)) {
1807 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1811 nb_ports = rte_eth_dev_count();
1813 printf("Port '%s' is detached. Now total ports is %d\n",
1825 stop_packet_forwarding();
1827 if (ports != NULL) {
1829 RTE_ETH_FOREACH_DEV(pt_id) {
1830 printf("\nShutting down port %d...\n", pt_id);
1836 printf("\nBye...\n");
1839 typedef void (*cmd_func_t)(void);
1840 struct pmd_test_command {
1841 const char *cmd_name;
1842 cmd_func_t cmd_func;
1845 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1847 /* Check the link status of all ports in up to 9s, and print them finally */
1849 check_all_ports_link_status(uint32_t port_mask)
1851 #define CHECK_INTERVAL 100 /* 100ms */
1852 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1854 uint8_t count, all_ports_up, print_flag = 0;
1855 struct rte_eth_link link;
1857 printf("Checking link statuses...\n");
1859 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1861 RTE_ETH_FOREACH_DEV(portid) {
1862 if ((port_mask & (1 << portid)) == 0)
1864 memset(&link, 0, sizeof(link));
1865 rte_eth_link_get_nowait(portid, &link);
1866 /* print link status if flag set */
1867 if (print_flag == 1) {
1868 if (link.link_status)
1870 "Port%d Link Up. speed %u Mbps- %s\n",
1871 portid, link.link_speed,
1872 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1873 ("full-duplex") : ("half-duplex\n"));
1875 printf("Port %d Link Down\n", portid);
1878 /* clear all_ports_up flag if any link down */
1879 if (link.link_status == ETH_LINK_DOWN) {
1884 /* after finally printing all link status, get out */
1885 if (print_flag == 1)
1888 if (all_ports_up == 0) {
1890 rte_delay_ms(CHECK_INTERVAL);
1893 /* set the print_flag if all ports up or timeout */
1894 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1904 rmv_event_callback(void *arg)
1906 struct rte_eth_dev *dev;
1907 portid_t port_id = (intptr_t)arg;
1909 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1910 dev = &rte_eth_devices[port_id];
1913 close_port(port_id);
1914 printf("removing device %s\n", dev->device->name);
1915 if (rte_eal_dev_detach(dev->device))
1916 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1920 /* This function is used by the interrupt thread */
1922 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1925 static const char * const event_desc[] = {
1926 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1927 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1928 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1929 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1930 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1931 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1932 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1933 [RTE_ETH_EVENT_MAX] = NULL,
1936 RTE_SET_USED(param);
1937 RTE_SET_USED(ret_param);
1939 if (type >= RTE_ETH_EVENT_MAX) {
1940 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1941 port_id, __func__, type);
1943 } else if (event_print_mask & (UINT32_C(1) << type)) {
1944 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1950 case RTE_ETH_EVENT_INTR_RMV:
1951 if (rte_eal_alarm_set(100000,
1952 rmv_event_callback, (void *)(intptr_t)port_id))
1953 fprintf(stderr, "Could not set up deferred device removal\n");
1962 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1966 uint8_t mapping_found = 0;
1968 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1969 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1970 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1971 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1972 tx_queue_stats_mappings[i].queue_id,
1973 tx_queue_stats_mappings[i].stats_counter_id);
1980 port->tx_queue_stats_mapping_enabled = 1;
1985 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1989 uint8_t mapping_found = 0;
1991 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1992 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1993 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1994 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1995 rx_queue_stats_mappings[i].queue_id,
1996 rx_queue_stats_mappings[i].stats_counter_id);
2003 port->rx_queue_stats_mapping_enabled = 1;
2008 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2012 diag = set_tx_queue_stats_mapping_registers(pi, port);
2014 if (diag == -ENOTSUP) {
2015 port->tx_queue_stats_mapping_enabled = 0;
2016 printf("TX queue stats mapping not supported port id=%d\n", pi);
2019 rte_exit(EXIT_FAILURE,
2020 "set_tx_queue_stats_mapping_registers "
2021 "failed for port id=%d diag=%d\n",
2025 diag = set_rx_queue_stats_mapping_registers(pi, port);
2027 if (diag == -ENOTSUP) {
2028 port->rx_queue_stats_mapping_enabled = 0;
2029 printf("RX queue stats mapping not supported port id=%d\n", pi);
2032 rte_exit(EXIT_FAILURE,
2033 "set_rx_queue_stats_mapping_registers "
2034 "failed for port id=%d diag=%d\n",
2040 rxtx_port_config(struct rte_port *port)
2042 port->rx_conf = port->dev_info.default_rxconf;
2043 port->tx_conf = port->dev_info.default_txconf;
2045 /* Check if any RX/TX parameters have been passed */
2046 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2047 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2049 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2050 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2052 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2053 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2055 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2056 port->rx_conf.rx_free_thresh = rx_free_thresh;
2058 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2059 port->rx_conf.rx_drop_en = rx_drop_en;
2061 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2062 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2064 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2065 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2067 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2068 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2070 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2071 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2073 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2074 port->tx_conf.tx_free_thresh = tx_free_thresh;
2076 if (txq_flags != RTE_PMD_PARAM_UNSET)
2077 port->tx_conf.txq_flags = txq_flags;
2081 init_port_config(void)
2084 struct rte_port *port;
2086 RTE_ETH_FOREACH_DEV(pid) {
2088 port->dev_conf.rxmode = rx_mode;
2089 port->dev_conf.fdir_conf = fdir_conf;
2091 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2092 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2094 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2095 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2098 if (port->dcb_flag == 0) {
2099 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2100 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2102 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2105 rxtx_port_config(port);
2107 rte_eth_macaddr_get(pid, &port->eth_addr);
2109 map_port_queue_stats_mapping_registers(pid, port);
2110 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2111 rte_pmd_ixgbe_bypass_init(pid);
2114 if (lsc_interrupt &&
2115 (rte_eth_devices[pid].data->dev_flags &
2116 RTE_ETH_DEV_INTR_LSC))
2117 port->dev_conf.intr_conf.lsc = 1;
2118 if (rmv_interrupt &&
2119 (rte_eth_devices[pid].data->dev_flags &
2120 RTE_ETH_DEV_INTR_RMV))
2121 port->dev_conf.intr_conf.rmv = 1;
2123 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2124 /* Detect softnic port */
2125 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2126 port->softnic_enable = 1;
2127 memset(&port->softport, 0, sizeof(struct softnic_port));
2129 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2130 port->softport.tm_flag = 1;
2136 void set_port_slave_flag(portid_t slave_pid)
2138 struct rte_port *port;
2140 port = &ports[slave_pid];
2141 port->slave_flag = 1;
2144 void clear_port_slave_flag(portid_t slave_pid)
2146 struct rte_port *port;
2148 port = &ports[slave_pid];
2149 port->slave_flag = 0;
2152 uint8_t port_is_bonding_slave(portid_t slave_pid)
2154 struct rte_port *port;
2156 port = &ports[slave_pid];
2157 return port->slave_flag;
2160 const uint16_t vlan_tags[] = {
2161 0, 1, 2, 3, 4, 5, 6, 7,
2162 8, 9, 10, 11, 12, 13, 14, 15,
2163 16, 17, 18, 19, 20, 21, 22, 23,
2164 24, 25, 26, 27, 28, 29, 30, 31
2168 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2169 enum dcb_mode_enable dcb_mode,
2170 enum rte_eth_nb_tcs num_tcs,
2176 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2177 * given above, and the number of traffic classes available for use.
2179 if (dcb_mode == DCB_VT_ENABLED) {
2180 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2181 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2182 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2183 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2185 /* VMDQ+DCB RX and TX configurations */
2186 vmdq_rx_conf->enable_default_pool = 0;
2187 vmdq_rx_conf->default_pool = 0;
2188 vmdq_rx_conf->nb_queue_pools =
2189 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2190 vmdq_tx_conf->nb_queue_pools =
2191 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2193 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2194 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2195 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2196 vmdq_rx_conf->pool_map[i].pools =
2197 1 << (i % vmdq_rx_conf->nb_queue_pools);
2199 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2200 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2201 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2204 /* set DCB mode of RX and TX of multiple queues */
2205 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2206 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2208 struct rte_eth_dcb_rx_conf *rx_conf =
2209 ð_conf->rx_adv_conf.dcb_rx_conf;
2210 struct rte_eth_dcb_tx_conf *tx_conf =
2211 ð_conf->tx_adv_conf.dcb_tx_conf;
2213 rx_conf->nb_tcs = num_tcs;
2214 tx_conf->nb_tcs = num_tcs;
2216 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2217 rx_conf->dcb_tc[i] = i % num_tcs;
2218 tx_conf->dcb_tc[i] = i % num_tcs;
2220 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2221 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2222 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2226 eth_conf->dcb_capability_en =
2227 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2229 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2235 init_port_dcb_config(portid_t pid,
2236 enum dcb_mode_enable dcb_mode,
2237 enum rte_eth_nb_tcs num_tcs,
2240 struct rte_eth_conf port_conf;
2241 struct rte_port *rte_port;
2245 rte_port = &ports[pid];
2247 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2248 /* Enter DCB configuration status */
2251 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2252 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2255 port_conf.rxmode.hw_vlan_filter = 1;
2258 * Write the configuration into the device.
2259 * Set the numbers of RX & TX queues to 0, so
2260 * the RX & TX queues will not be setup.
2262 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2264 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2266 /* If dev_info.vmdq_pool_base is greater than 0,
2267 * the queue id of vmdq pools is started after pf queues.
2269 if (dcb_mode == DCB_VT_ENABLED &&
2270 rte_port->dev_info.vmdq_pool_base > 0) {
2271 printf("VMDQ_DCB multi-queue mode is nonsensical"
2272 " for port %d.", pid);
2276 /* Assume the ports in testpmd have the same dcb capability
2277 * and has the same number of rxq and txq in dcb mode
2279 if (dcb_mode == DCB_VT_ENABLED) {
2280 if (rte_port->dev_info.max_vfs > 0) {
2281 nb_rxq = rte_port->dev_info.nb_rx_queues;
2282 nb_txq = rte_port->dev_info.nb_tx_queues;
2284 nb_rxq = rte_port->dev_info.max_rx_queues;
2285 nb_txq = rte_port->dev_info.max_tx_queues;
2288 /*if vt is disabled, use all pf queues */
2289 if (rte_port->dev_info.vmdq_pool_base == 0) {
2290 nb_rxq = rte_port->dev_info.max_rx_queues;
2291 nb_txq = rte_port->dev_info.max_tx_queues;
2293 nb_rxq = (queueid_t)num_tcs;
2294 nb_txq = (queueid_t)num_tcs;
2298 rx_free_thresh = 64;
2300 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2302 rxtx_port_config(rte_port);
2304 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2305 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2306 rx_vft_set(pid, vlan_tags[i], 1);
2308 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2309 map_port_queue_stats_mapping_registers(pid, rte_port);
2311 rte_port->dcb_flag = 1;
2319 /* Configuration of Ethernet ports. */
2320 ports = rte_zmalloc("testpmd: ports",
2321 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2322 RTE_CACHE_LINE_SIZE);
2323 if (ports == NULL) {
2324 rte_exit(EXIT_FAILURE,
2325 "rte_zmalloc(%d struct rte_port) failed\n",
2341 const char clr[] = { 27, '[', '2', 'J', '\0' };
2342 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2344 /* Clear screen and move to top left */
2345 printf("%s%s", clr, top_left);
2347 printf("\nPort statistics ====================================");
2348 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2349 nic_stats_display(fwd_ports_ids[i]);
2353 signal_handler(int signum)
2355 if (signum == SIGINT || signum == SIGTERM) {
2356 printf("\nSignal %d received, preparing to exit...\n",
2358 #ifdef RTE_LIBRTE_PDUMP
2359 /* uninitialize packet capture framework */
2362 #ifdef RTE_LIBRTE_LATENCY_STATS
2363 rte_latencystats_uninit();
2366 /* Set flag to indicate the force termination. */
2368 /* exit with the expected status */
2369 signal(signum, SIG_DFL);
2370 kill(getpid(), signum);
2375 main(int argc, char** argv)
2380 signal(SIGINT, signal_handler);
2381 signal(SIGTERM, signal_handler);
2383 diag = rte_eal_init(argc, argv);
2385 rte_panic("Cannot init EAL\n");
2387 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2388 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2392 #ifdef RTE_LIBRTE_PDUMP
2393 /* initialize packet capture framework */
2394 rte_pdump_init(NULL);
2397 nb_ports = (portid_t) rte_eth_dev_count();
2399 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2401 /* allocate port structures, and init them */
2404 set_def_fwd_config();
2406 rte_panic("Empty set of forwarding logical cores - check the "
2407 "core mask supplied in the command parameters\n");
2409 /* Bitrate/latency stats disabled by default */
2410 #ifdef RTE_LIBRTE_BITRATE
2411 bitrate_enabled = 0;
2413 #ifdef RTE_LIBRTE_LATENCY_STATS
2414 latencystats_enabled = 0;
2420 launch_args_parse(argc, argv);
2422 if (tx_first && interactive)
2423 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2424 "interactive mode.\n");
2426 if (tx_first && lsc_interrupt) {
2427 printf("Warning: lsc_interrupt needs to be off when "
2428 " using tx_first. Disabling.\n");
2432 if (!nb_rxq && !nb_txq)
2433 printf("Warning: Either rx or tx queues should be non-zero\n");
2435 if (nb_rxq > 1 && nb_rxq > nb_txq)
2436 printf("Warning: nb_rxq=%d enables RSS configuration, "
2437 "but nb_txq=%d will prevent to fully test it.\n",
2441 if (start_port(RTE_PORT_ALL) != 0)
2442 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2444 /* set all ports to promiscuous mode by default */
2445 RTE_ETH_FOREACH_DEV(port_id)
2446 rte_eth_promiscuous_enable(port_id);
2448 /* Init metrics library */
2449 rte_metrics_init(rte_socket_id());
2451 #ifdef RTE_LIBRTE_LATENCY_STATS
2452 if (latencystats_enabled != 0) {
2453 int ret = rte_latencystats_init(1, NULL);
2455 printf("Warning: latencystats init()"
2456 " returned error %d\n", ret);
2457 printf("Latencystats running on lcore %d\n",
2458 latencystats_lcore_id);
2462 /* Setup bitrate stats */
2463 #ifdef RTE_LIBRTE_BITRATE
2464 if (bitrate_enabled != 0) {
2465 bitrate_data = rte_stats_bitrate_create();
2466 if (bitrate_data == NULL)
2467 rte_exit(EXIT_FAILURE,
2468 "Could not allocate bitrate data.\n");
2469 rte_stats_bitrate_reg(bitrate_data);
2473 #ifdef RTE_LIBRTE_CMDLINE
2474 if (strlen(cmdline_filename) != 0)
2475 cmdline_read_from_file(cmdline_filename);
2477 if (interactive == 1) {
2479 printf("Start automatic packet forwarding\n");
2480 start_packet_forwarding(0);
2492 printf("No commandline core given, start packet forwarding\n");
2493 start_packet_forwarding(tx_first);
2494 if (stats_period != 0) {
2495 uint64_t prev_time = 0, cur_time, diff_time = 0;
2496 uint64_t timer_period;
2498 /* Convert to number of cycles */
2499 timer_period = stats_period * rte_get_timer_hz();
2501 while (f_quit == 0) {
2502 cur_time = rte_get_timer_cycles();
2503 diff_time += cur_time - prev_time;
2505 if (diff_time >= timer_period) {
2507 /* Reset the timer */
2510 /* Sleep to avoid unnecessary checks */
2511 prev_time = cur_time;
2516 printf("Press enter to exit\n");
2517 rc = read(0, &c, 1);