4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
94 uint16_t verbose_level = 0; /**< Silent by default. */
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
100 char cmdline_filename[PATH_MAX] = {0};
103 * NUMA support configuration.
104 * When set, the NUMA support attempts to dispatch the allocation of the
105 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106 * probed ports among the CPU sockets 0 and 1.
107 * Otherwise, all memory is allocated from CPU socket 0.
109 uint8_t numa_support = 1; /**< numa enabled by default */
112 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
115 uint8_t socket_num = UMA_NO_CONFIG;
118 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
123 * Record the Ethernet address of peer target ports to which packets are
125 * Must be instantiated with the ethernet addresses of peer traffic generator
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
132 * Probed Target Environment.
134 struct rte_port *ports; /**< For all probed ethernet ports. */
135 portid_t nb_ports; /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
140 * Test Forwarding Configuration.
141 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t nb_cfg_ports; /**< Number of configured ports. */
147 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
156 * Forwarding engines.
158 struct fwd_engine * fwd_engines[] = {
167 #ifdef RTE_LIBRTE_IEEE1588
168 &ieee1588_fwd_engine,
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
181 * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
185 * In container, it cannot terminate the process which running with 'stats-period'
186 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
191 * Configuration of packet segments used by the "txonly" processing engine.
193 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
194 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
195 TXONLY_DEF_PACKET_LEN,
197 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
199 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
200 /**< Split policy for packets to TX. */
202 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
203 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
205 /* current configuration is in DCB or not,0 means it is not in DCB mode */
206 uint8_t dcb_config = 0;
208 /* Whether the dcb is in testing status */
209 uint8_t dcb_test = 0;
212 * Configurable number of RX/TX queues.
214 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
215 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
218 * Configurable number of RX/TX ring descriptors.
220 #define RTE_TEST_RX_DESC_DEFAULT 128
221 #define RTE_TEST_TX_DESC_DEFAULT 512
222 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
223 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
225 #define RTE_PMD_PARAM_UNSET -1
227 * Configurable values of RX and TX ring threshold registers.
230 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
234 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
239 * Configurable value of RX free threshold.
241 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
244 * Configurable value of RX drop enable.
246 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX free threshold.
251 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
254 * Configurable value of TX RS bit threshold.
256 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
259 * Configurable value of TX queue flags.
261 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
264 * Receive Side Scaling (RSS) configuration.
266 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
269 * Port topology configuration
271 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
274 * Avoids to flush all the RX streams before starts forwarding.
276 uint8_t no_flush_rx = 0; /* flush by default */
279 * Flow API isolated mode.
281 uint8_t flow_isolate_all;
284 * Avoids to check link status when starting/stopping a port.
286 uint8_t no_link_check = 0; /* check by default */
289 * Enable link status change notification
291 uint8_t lsc_interrupt = 1; /* enabled by default */
294 * Enable device removal notification.
296 uint8_t rmv_interrupt = 1; /* enabled by default */
299 * Display or mask ether events
300 * Default to all events except VF_MBOX
302 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
303 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
304 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
305 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
306 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
307 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
310 * NIC bypass mode configuration options.
313 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
314 /* The NIC bypass watchdog timeout. */
315 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
319 #ifdef RTE_LIBRTE_LATENCY_STATS
322 * Set when latency stats is enabled in the commandline
324 uint8_t latencystats_enabled;
327 * Lcore ID to serive latency statistics.
329 lcoreid_t latencystats_lcore_id = -1;
334 * Ethernet device configuration.
336 struct rte_eth_rxmode rx_mode = {
337 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
339 .header_split = 0, /**< Header Split disabled. */
340 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
341 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
342 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
343 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
344 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
345 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
348 struct rte_fdir_conf fdir_conf = {
349 .mode = RTE_FDIR_MODE_NONE,
350 .pballoc = RTE_FDIR_PBALLOC_64K,
351 .status = RTE_FDIR_REPORT_STATUS,
353 .vlan_tci_mask = 0x0,
355 .src_ip = 0xFFFFFFFF,
356 .dst_ip = 0xFFFFFFFF,
359 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
360 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
362 .src_port_mask = 0xFFFF,
363 .dst_port_mask = 0xFFFF,
364 .mac_addr_byte_mask = 0xFF,
365 .tunnel_type_mask = 1,
366 .tunnel_id_mask = 0xFFFFFFFF,
371 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
373 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
374 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
376 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
377 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
379 uint16_t nb_tx_queue_stats_mappings = 0;
380 uint16_t nb_rx_queue_stats_mappings = 0;
382 unsigned int num_sockets = 0;
383 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 #ifdef RTE_LIBRTE_BITRATE
386 /* Bitrate statistics */
387 struct rte_stats_bitrates *bitrate_data;
388 lcoreid_t bitrate_lcore_id;
389 uint8_t bitrate_enabled;
392 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
393 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 /* Forward function declarations */
396 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
397 static void check_all_ports_link_status(uint32_t port_mask);
398 static int eth_event_callback(portid_t port_id,
399 enum rte_eth_event_type type,
400 void *param, void *ret_param);
403 * Check if all the ports are started.
404 * If yes, return positive value. If not, return zero.
406 static int all_ports_started(void);
408 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
409 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
412 * Helper function to check if socket is already discovered.
413 * If yes, return positive value. If not, return zero.
416 new_socket_id(unsigned int socket_id)
420 for (i = 0; i < num_sockets; i++) {
421 if (socket_ids[i] == socket_id)
428 * Setup default configuration.
431 set_default_fwd_lcores_config(void)
435 unsigned int sock_num;
438 for (i = 0; i < RTE_MAX_LCORE; i++) {
439 sock_num = rte_lcore_to_socket_id(i);
440 if (new_socket_id(sock_num)) {
441 if (num_sockets >= RTE_MAX_NUMA_NODES) {
442 rte_exit(EXIT_FAILURE,
443 "Total sockets greater than %u\n",
446 socket_ids[num_sockets++] = sock_num;
448 if (!rte_lcore_is_enabled(i))
450 if (i == rte_get_master_lcore())
452 fwd_lcores_cpuids[nb_lc++] = i;
454 nb_lcores = (lcoreid_t) nb_lc;
455 nb_cfg_lcores = nb_lcores;
460 set_def_peer_eth_addrs(void)
464 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
465 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
466 peer_eth_addrs[i].addr_bytes[5] = i;
471 set_default_fwd_ports_config(void)
476 RTE_ETH_FOREACH_DEV(pt_id)
477 fwd_ports_ids[i++] = pt_id;
479 nb_cfg_ports = nb_ports;
480 nb_fwd_ports = nb_ports;
484 set_def_fwd_config(void)
486 set_default_fwd_lcores_config();
487 set_def_peer_eth_addrs();
488 set_default_fwd_ports_config();
492 * Configuration initialisation done once at init time.
495 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
496 unsigned int socket_id)
498 char pool_name[RTE_MEMPOOL_NAMESIZE];
499 struct rte_mempool *rte_mp = NULL;
502 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
503 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
506 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
507 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
510 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
511 mb_size, (unsigned) mb_mempool_cache,
512 sizeof(struct rte_pktmbuf_pool_private),
517 if (rte_mempool_populate_anon(rte_mp) == 0) {
518 rte_mempool_free(rte_mp);
522 rte_pktmbuf_pool_init(rte_mp, NULL);
523 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
525 /* wrapper to rte_mempool_create() */
526 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
527 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
531 if (rte_mp == NULL) {
532 rte_exit(EXIT_FAILURE,
533 "Creation of mbuf pool for socket %u failed: %s\n",
534 socket_id, rte_strerror(rte_errno));
535 } else if (verbose_level > 0) {
536 rte_mempool_dump(stdout, rte_mp);
541 * Check given socket id is valid or not with NUMA mode,
542 * if valid, return 0, else return -1
545 check_socket_id(const unsigned int socket_id)
547 static int warning_once = 0;
549 if (new_socket_id(socket_id)) {
550 if (!warning_once && numa_support)
551 printf("Warning: NUMA should be configured manually by"
552 " using --port-numa-config and"
553 " --ring-numa-config parameters along with"
565 struct rte_port *port;
566 struct rte_mempool *mbp;
567 unsigned int nb_mbuf_per_pool;
569 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
570 struct rte_gro_param gro_param;
573 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
576 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
577 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
578 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
581 /* Configuration of logical cores. */
582 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
583 sizeof(struct fwd_lcore *) * nb_lcores,
584 RTE_CACHE_LINE_SIZE);
585 if (fwd_lcores == NULL) {
586 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
587 "failed\n", nb_lcores);
589 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
590 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
591 sizeof(struct fwd_lcore),
592 RTE_CACHE_LINE_SIZE);
593 if (fwd_lcores[lc_id] == NULL) {
594 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
597 fwd_lcores[lc_id]->cpuid_idx = lc_id;
600 RTE_ETH_FOREACH_DEV(pid) {
602 rte_eth_dev_info_get(pid, &port->dev_info);
605 if (port_numa[pid] != NUMA_NO_CONFIG)
606 port_per_socket[port_numa[pid]]++;
608 uint32_t socket_id = rte_eth_dev_socket_id(pid);
610 /* if socket_id is invalid, set to 0 */
611 if (check_socket_id(socket_id) < 0)
613 port_per_socket[socket_id]++;
617 /* set flag to initialize port/queue */
618 port->need_reconfig = 1;
619 port->need_reconfig_queues = 1;
623 * Create pools of mbuf.
624 * If NUMA support is disabled, create a single pool of mbuf in
625 * socket 0 memory by default.
626 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
628 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
629 * nb_txd can be configured at run time.
631 if (param_total_num_mbufs)
632 nb_mbuf_per_pool = param_total_num_mbufs;
634 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
635 (nb_lcores * mb_mempool_cache) +
636 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
637 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
643 for (i = 0; i < num_sockets; i++)
644 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
647 if (socket_num == UMA_NO_CONFIG)
648 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
650 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
656 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
657 DEV_TX_OFFLOAD_GRE_TNL_TSO;
659 * Records which Mbuf pool to use by each logical core, if needed.
661 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
662 mbp = mbuf_pool_find(
663 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
666 mbp = mbuf_pool_find(0);
667 fwd_lcores[lc_id]->mbp = mbp;
668 /* initialize GSO context */
669 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
670 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
671 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
672 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
674 fwd_lcores[lc_id]->gso_ctx.flag = 0;
677 /* Configuration of packet forwarding streams. */
678 if (init_fwd_streams() < 0)
679 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
683 /* create a gro context for each lcore */
684 gro_param.gro_types = RTE_GRO_TCP_IPV4;
685 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
686 gro_param.max_item_per_flow = MAX_PKT_BURST;
687 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
688 gro_param.socket_id = rte_lcore_to_socket_id(
689 fwd_lcores_cpuids[lc_id]);
690 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
691 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
692 rte_exit(EXIT_FAILURE,
693 "rte_gro_ctx_create() failed\n");
700 reconfig(portid_t new_port_id, unsigned socket_id)
702 struct rte_port *port;
704 /* Reconfiguration of Ethernet ports. */
705 port = &ports[new_port_id];
706 rte_eth_dev_info_get(new_port_id, &port->dev_info);
708 /* set flag to initialize port/queue */
709 port->need_reconfig = 1;
710 port->need_reconfig_queues = 1;
711 port->socket_id = socket_id;
718 init_fwd_streams(void)
721 struct rte_port *port;
722 streamid_t sm_id, nb_fwd_streams_new;
725 /* set socket id according to numa or not */
726 RTE_ETH_FOREACH_DEV(pid) {
728 if (nb_rxq > port->dev_info.max_rx_queues) {
729 printf("Fail: nb_rxq(%d) is greater than "
730 "max_rx_queues(%d)\n", nb_rxq,
731 port->dev_info.max_rx_queues);
734 if (nb_txq > port->dev_info.max_tx_queues) {
735 printf("Fail: nb_txq(%d) is greater than "
736 "max_tx_queues(%d)\n", nb_txq,
737 port->dev_info.max_tx_queues);
741 if (port_numa[pid] != NUMA_NO_CONFIG)
742 port->socket_id = port_numa[pid];
744 port->socket_id = rte_eth_dev_socket_id(pid);
746 /* if socket_id is invalid, set to 0 */
747 if (check_socket_id(port->socket_id) < 0)
752 if (socket_num == UMA_NO_CONFIG)
755 port->socket_id = socket_num;
759 q = RTE_MAX(nb_rxq, nb_txq);
761 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
764 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
765 if (nb_fwd_streams_new == nb_fwd_streams)
768 if (fwd_streams != NULL) {
769 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
770 if (fwd_streams[sm_id] == NULL)
772 rte_free(fwd_streams[sm_id]);
773 fwd_streams[sm_id] = NULL;
775 rte_free(fwd_streams);
780 nb_fwd_streams = nb_fwd_streams_new;
781 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
782 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
783 if (fwd_streams == NULL)
784 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
785 "failed\n", nb_fwd_streams);
787 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
788 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
789 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
790 if (fwd_streams[sm_id] == NULL)
791 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
798 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
800 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
802 unsigned int total_burst;
803 unsigned int nb_burst;
804 unsigned int burst_stats[3];
805 uint16_t pktnb_stats[3];
807 int burst_percent[3];
810 * First compute the total number of packet bursts and the
811 * two highest numbers of bursts of the same number of packets.
814 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
815 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
816 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
817 nb_burst = pbs->pkt_burst_spread[nb_pkt];
820 total_burst += nb_burst;
821 if (nb_burst > burst_stats[0]) {
822 burst_stats[1] = burst_stats[0];
823 pktnb_stats[1] = pktnb_stats[0];
824 burst_stats[0] = nb_burst;
825 pktnb_stats[0] = nb_pkt;
828 if (total_burst == 0)
830 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
831 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
832 burst_percent[0], (int) pktnb_stats[0]);
833 if (burst_stats[0] == total_burst) {
837 if (burst_stats[0] + burst_stats[1] == total_burst) {
838 printf(" + %d%% of %d pkts]\n",
839 100 - burst_percent[0], pktnb_stats[1]);
842 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
843 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
844 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
845 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
848 printf(" + %d%% of %d pkts + %d%% of others]\n",
849 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
851 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
854 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
856 struct rte_port *port;
859 static const char *fwd_stats_border = "----------------------";
861 port = &ports[port_id];
862 printf("\n %s Forward statistics for port %-2d %s\n",
863 fwd_stats_border, port_id, fwd_stats_border);
865 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
866 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
868 stats->ipackets, stats->imissed,
869 (uint64_t) (stats->ipackets + stats->imissed));
871 if (cur_fwd_eng == &csum_fwd_engine)
872 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
873 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
874 if ((stats->ierrors + stats->rx_nombuf) > 0) {
875 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
876 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
879 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
881 stats->opackets, port->tx_dropped,
882 (uint64_t) (stats->opackets + port->tx_dropped));
885 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
887 stats->ipackets, stats->imissed,
888 (uint64_t) (stats->ipackets + stats->imissed));
890 if (cur_fwd_eng == &csum_fwd_engine)
891 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
892 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
893 if ((stats->ierrors + stats->rx_nombuf) > 0) {
894 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
895 printf(" RX-nombufs: %14"PRIu64"\n",
899 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
901 stats->opackets, port->tx_dropped,
902 (uint64_t) (stats->opackets + port->tx_dropped));
905 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
907 pkt_burst_stats_display("RX",
908 &port->rx_stream->rx_burst_stats);
910 pkt_burst_stats_display("TX",
911 &port->tx_stream->tx_burst_stats);
914 if (port->rx_queue_stats_mapping_enabled) {
916 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
917 printf(" Stats reg %2d RX-packets:%14"PRIu64
918 " RX-errors:%14"PRIu64
919 " RX-bytes:%14"PRIu64"\n",
920 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
924 if (port->tx_queue_stats_mapping_enabled) {
925 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
926 printf(" Stats reg %2d TX-packets:%14"PRIu64
927 " TX-bytes:%14"PRIu64"\n",
928 i, stats->q_opackets[i], stats->q_obytes[i]);
932 printf(" %s--------------------------------%s\n",
933 fwd_stats_border, fwd_stats_border);
937 fwd_stream_stats_display(streamid_t stream_id)
939 struct fwd_stream *fs;
940 static const char *fwd_top_stats_border = "-------";
942 fs = fwd_streams[stream_id];
943 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
944 (fs->fwd_dropped == 0))
946 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
947 "TX Port=%2d/Queue=%2d %s\n",
948 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
949 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
950 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
951 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
953 /* if checksum mode */
954 if (cur_fwd_eng == &csum_fwd_engine) {
955 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
956 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
959 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
960 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
961 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
966 flush_fwd_rx_queues(void)
968 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
975 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
976 uint64_t timer_period;
978 /* convert to number of cycles */
979 timer_period = rte_get_timer_hz(); /* 1 second timeout */
981 for (j = 0; j < 2; j++) {
982 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
983 for (rxq = 0; rxq < nb_rxq; rxq++) {
984 port_id = fwd_ports_ids[rxp];
986 * testpmd can stuck in the below do while loop
987 * if rte_eth_rx_burst() always returns nonzero
988 * packets. So timer is added to exit this loop
989 * after 1sec timer expiry.
991 prev_tsc = rte_rdtsc();
993 nb_rx = rte_eth_rx_burst(port_id, rxq,
994 pkts_burst, MAX_PKT_BURST);
995 for (i = 0; i < nb_rx; i++)
996 rte_pktmbuf_free(pkts_burst[i]);
998 cur_tsc = rte_rdtsc();
999 diff_tsc = cur_tsc - prev_tsc;
1000 timer_tsc += diff_tsc;
1001 } while ((nb_rx > 0) &&
1002 (timer_tsc < timer_period));
1006 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1011 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1013 struct fwd_stream **fsm;
1016 #ifdef RTE_LIBRTE_BITRATE
1017 uint64_t tics_per_1sec;
1018 uint64_t tics_datum;
1019 uint64_t tics_current;
1020 uint8_t idx_port, cnt_ports;
1022 cnt_ports = rte_eth_dev_count();
1023 tics_datum = rte_rdtsc();
1024 tics_per_1sec = rte_get_timer_hz();
1026 fsm = &fwd_streams[fc->stream_idx];
1027 nb_fs = fc->stream_nb;
1029 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1030 (*pkt_fwd)(fsm[sm_id]);
1031 #ifdef RTE_LIBRTE_BITRATE
1032 if (bitrate_enabled != 0 &&
1033 bitrate_lcore_id == rte_lcore_id()) {
1034 tics_current = rte_rdtsc();
1035 if (tics_current - tics_datum >= tics_per_1sec) {
1036 /* Periodic bitrate calculation */
1038 idx_port < cnt_ports;
1040 rte_stats_bitrate_calc(bitrate_data,
1042 tics_datum = tics_current;
1046 #ifdef RTE_LIBRTE_LATENCY_STATS
1047 if (latencystats_enabled != 0 &&
1048 latencystats_lcore_id == rte_lcore_id())
1049 rte_latencystats_update();
1052 } while (! fc->stopped);
1056 start_pkt_forward_on_core(void *fwd_arg)
1058 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1059 cur_fwd_config.fwd_eng->packet_fwd);
1064 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1065 * Used to start communication flows in network loopback test configurations.
1068 run_one_txonly_burst_on_core(void *fwd_arg)
1070 struct fwd_lcore *fwd_lc;
1071 struct fwd_lcore tmp_lcore;
1073 fwd_lc = (struct fwd_lcore *) fwd_arg;
1074 tmp_lcore = *fwd_lc;
1075 tmp_lcore.stopped = 1;
1076 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1081 * Launch packet forwarding:
1082 * - Setup per-port forwarding context.
1083 * - launch logical cores with their forwarding configuration.
1086 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1088 port_fwd_begin_t port_fwd_begin;
1093 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1094 if (port_fwd_begin != NULL) {
1095 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1096 (*port_fwd_begin)(fwd_ports_ids[i]);
1098 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1099 lc_id = fwd_lcores_cpuids[i];
1100 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1101 fwd_lcores[i]->stopped = 0;
1102 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1103 fwd_lcores[i], lc_id);
1105 printf("launch lcore %u failed - diag=%d\n",
1112 * Launch packet forwarding configuration.
1115 start_packet_forwarding(int with_tx_first)
1117 port_fwd_begin_t port_fwd_begin;
1118 port_fwd_end_t port_fwd_end;
1119 struct rte_port *port;
1124 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1125 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1127 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1128 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1130 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1131 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1132 (!nb_rxq || !nb_txq))
1133 rte_exit(EXIT_FAILURE,
1134 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1135 cur_fwd_eng->fwd_mode_name);
1137 if (all_ports_started() == 0) {
1138 printf("Not all ports were started\n");
1141 if (test_done == 0) {
1142 printf("Packet forwarding already started\n");
1146 if (init_fwd_streams() < 0) {
1147 printf("Fail from init_fwd_streams()\n");
1152 for (i = 0; i < nb_fwd_ports; i++) {
1153 pt_id = fwd_ports_ids[i];
1154 port = &ports[pt_id];
1155 if (!port->dcb_flag) {
1156 printf("In DCB mode, all forwarding ports must "
1157 "be configured in this mode.\n");
1161 if (nb_fwd_lcores == 1) {
1162 printf("In DCB mode,the nb forwarding cores "
1163 "should be larger than 1.\n");
1170 flush_fwd_rx_queues();
1173 pkt_fwd_config_display(&cur_fwd_config);
1174 rxtx_config_display();
1176 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1177 pt_id = fwd_ports_ids[i];
1178 port = &ports[pt_id];
1179 rte_eth_stats_get(pt_id, &port->stats);
1180 port->tx_dropped = 0;
1182 map_port_queue_stats_mapping_registers(pt_id, port);
1184 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1185 fwd_streams[sm_id]->rx_packets = 0;
1186 fwd_streams[sm_id]->tx_packets = 0;
1187 fwd_streams[sm_id]->fwd_dropped = 0;
1188 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1189 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1191 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1192 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1193 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1194 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1195 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1197 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1198 fwd_streams[sm_id]->core_cycles = 0;
1201 if (with_tx_first) {
1202 port_fwd_begin = tx_only_engine.port_fwd_begin;
1203 if (port_fwd_begin != NULL) {
1204 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1205 (*port_fwd_begin)(fwd_ports_ids[i]);
1207 while (with_tx_first--) {
1208 launch_packet_forwarding(
1209 run_one_txonly_burst_on_core);
1210 rte_eal_mp_wait_lcore();
1212 port_fwd_end = tx_only_engine.port_fwd_end;
1213 if (port_fwd_end != NULL) {
1214 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1215 (*port_fwd_end)(fwd_ports_ids[i]);
1218 launch_packet_forwarding(start_pkt_forward_on_core);
1222 stop_packet_forwarding(void)
1224 struct rte_eth_stats stats;
1225 struct rte_port *port;
1226 port_fwd_end_t port_fwd_end;
1231 uint64_t total_recv;
1232 uint64_t total_xmit;
1233 uint64_t total_rx_dropped;
1234 uint64_t total_tx_dropped;
1235 uint64_t total_rx_nombuf;
1236 uint64_t tx_dropped;
1237 uint64_t rx_bad_ip_csum;
1238 uint64_t rx_bad_l4_csum;
1239 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1240 uint64_t fwd_cycles;
1243 static const char *acc_stats_border = "+++++++++++++++";
1246 printf("Packet forwarding not started\n");
1249 printf("Telling cores to stop...");
1250 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1251 fwd_lcores[lc_id]->stopped = 1;
1252 printf("\nWaiting for lcores to finish...\n");
1253 rte_eal_mp_wait_lcore();
1254 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1255 if (port_fwd_end != NULL) {
1256 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1257 pt_id = fwd_ports_ids[i];
1258 (*port_fwd_end)(pt_id);
1261 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1264 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1265 if (cur_fwd_config.nb_fwd_streams >
1266 cur_fwd_config.nb_fwd_ports) {
1267 fwd_stream_stats_display(sm_id);
1268 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1269 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1271 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1273 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1276 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1277 tx_dropped = (uint64_t) (tx_dropped +
1278 fwd_streams[sm_id]->fwd_dropped);
1279 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1282 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1283 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1284 fwd_streams[sm_id]->rx_bad_ip_csum);
1285 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1289 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1290 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1291 fwd_streams[sm_id]->rx_bad_l4_csum);
1292 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1295 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1296 fwd_cycles = (uint64_t) (fwd_cycles +
1297 fwd_streams[sm_id]->core_cycles);
1302 total_rx_dropped = 0;
1303 total_tx_dropped = 0;
1304 total_rx_nombuf = 0;
1305 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1306 pt_id = fwd_ports_ids[i];
1308 port = &ports[pt_id];
1309 rte_eth_stats_get(pt_id, &stats);
1310 stats.ipackets -= port->stats.ipackets;
1311 port->stats.ipackets = 0;
1312 stats.opackets -= port->stats.opackets;
1313 port->stats.opackets = 0;
1314 stats.ibytes -= port->stats.ibytes;
1315 port->stats.ibytes = 0;
1316 stats.obytes -= port->stats.obytes;
1317 port->stats.obytes = 0;
1318 stats.imissed -= port->stats.imissed;
1319 port->stats.imissed = 0;
1320 stats.oerrors -= port->stats.oerrors;
1321 port->stats.oerrors = 0;
1322 stats.rx_nombuf -= port->stats.rx_nombuf;
1323 port->stats.rx_nombuf = 0;
1325 total_recv += stats.ipackets;
1326 total_xmit += stats.opackets;
1327 total_rx_dropped += stats.imissed;
1328 total_tx_dropped += port->tx_dropped;
1329 total_rx_nombuf += stats.rx_nombuf;
1331 fwd_port_stats_display(pt_id, &stats);
1334 printf("\n %s Accumulated forward statistics for all ports"
1336 acc_stats_border, acc_stats_border);
1337 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1339 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1341 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1342 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1343 if (total_rx_nombuf > 0)
1344 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1345 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1347 acc_stats_border, acc_stats_border);
1348 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1350 printf("\n CPU cycles/packet=%u (total cycles="
1351 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1352 (unsigned int)(fwd_cycles / total_recv),
1353 fwd_cycles, total_recv);
1355 printf("\nDone.\n");
1360 dev_set_link_up(portid_t pid)
1362 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1363 printf("\nSet link up fail.\n");
1367 dev_set_link_down(portid_t pid)
1369 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1370 printf("\nSet link down fail.\n");
1374 all_ports_started(void)
1377 struct rte_port *port;
1379 RTE_ETH_FOREACH_DEV(pi) {
1381 /* Check if there is a port which is not started */
1382 if ((port->port_status != RTE_PORT_STARTED) &&
1383 (port->slave_flag == 0))
1387 /* No port is not started */
1392 all_ports_stopped(void)
1395 struct rte_port *port;
1397 RTE_ETH_FOREACH_DEV(pi) {
1399 if ((port->port_status != RTE_PORT_STOPPED) &&
1400 (port->slave_flag == 0))
1408 port_is_started(portid_t port_id)
1410 if (port_id_is_invalid(port_id, ENABLED_WARN))
1413 if (ports[port_id].port_status != RTE_PORT_STARTED)
1420 port_is_closed(portid_t port_id)
1422 if (port_id_is_invalid(port_id, ENABLED_WARN))
1425 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1432 start_port(portid_t pid)
1434 int diag, need_check_link_status = -1;
1437 struct rte_port *port;
1438 struct ether_addr mac_addr;
1439 enum rte_eth_event_type event_type;
1441 if (port_id_is_invalid(pid, ENABLED_WARN))
1446 RTE_ETH_FOREACH_DEV(pi) {
1447 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1450 need_check_link_status = 0;
1452 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1453 RTE_PORT_HANDLING) == 0) {
1454 printf("Port %d is now not stopped\n", pi);
1458 if (port->need_reconfig > 0) {
1459 port->need_reconfig = 0;
1461 if (flow_isolate_all) {
1462 int ret = port_flow_isolate(pi, 1);
1464 printf("Failed to apply isolated"
1465 " mode on port %d\n", pi);
1470 printf("Configuring Port %d (socket %u)\n", pi,
1472 /* configure port */
1473 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1476 if (rte_atomic16_cmpset(&(port->port_status),
1477 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1478 printf("Port %d can not be set back "
1479 "to stopped\n", pi);
1480 printf("Fail to configure port %d\n", pi);
1481 /* try to reconfigure port next time */
1482 port->need_reconfig = 1;
1486 if (port->need_reconfig_queues > 0) {
1487 port->need_reconfig_queues = 0;
1488 /* setup tx queues */
1489 for (qi = 0; qi < nb_txq; qi++) {
1490 if ((numa_support) &&
1491 (txring_numa[pi] != NUMA_NO_CONFIG))
1492 diag = rte_eth_tx_queue_setup(pi, qi,
1493 nb_txd,txring_numa[pi],
1496 diag = rte_eth_tx_queue_setup(pi, qi,
1497 nb_txd,port->socket_id,
1503 /* Fail to setup tx queue, return */
1504 if (rte_atomic16_cmpset(&(port->port_status),
1506 RTE_PORT_STOPPED) == 0)
1507 printf("Port %d can not be set back "
1508 "to stopped\n", pi);
1509 printf("Fail to configure port %d tx queues\n", pi);
1510 /* try to reconfigure queues next time */
1511 port->need_reconfig_queues = 1;
1514 /* setup rx queues */
1515 for (qi = 0; qi < nb_rxq; qi++) {
1516 if ((numa_support) &&
1517 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1518 struct rte_mempool * mp =
1519 mbuf_pool_find(rxring_numa[pi]);
1521 printf("Failed to setup RX queue:"
1522 "No mempool allocation"
1523 " on the socket %d\n",
1528 diag = rte_eth_rx_queue_setup(pi, qi,
1529 nb_rxd,rxring_numa[pi],
1530 &(port->rx_conf),mp);
1532 struct rte_mempool *mp =
1533 mbuf_pool_find(port->socket_id);
1535 printf("Failed to setup RX queue:"
1536 "No mempool allocation"
1537 " on the socket %d\n",
1541 diag = rte_eth_rx_queue_setup(pi, qi,
1542 nb_rxd,port->socket_id,
1543 &(port->rx_conf), mp);
1548 /* Fail to setup rx queue, return */
1549 if (rte_atomic16_cmpset(&(port->port_status),
1551 RTE_PORT_STOPPED) == 0)
1552 printf("Port %d can not be set back "
1553 "to stopped\n", pi);
1554 printf("Fail to configure port %d rx queues\n", pi);
1555 /* try to reconfigure queues next time */
1556 port->need_reconfig_queues = 1;
1561 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1562 event_type < RTE_ETH_EVENT_MAX;
1564 diag = rte_eth_dev_callback_register(pi,
1569 printf("Failed to setup even callback for event %d\n",
1576 if (rte_eth_dev_start(pi) < 0) {
1577 printf("Fail to start port %d\n", pi);
1579 /* Fail to setup rx queue, return */
1580 if (rte_atomic16_cmpset(&(port->port_status),
1581 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1582 printf("Port %d can not be set back to "
1587 if (rte_atomic16_cmpset(&(port->port_status),
1588 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1589 printf("Port %d can not be set into started\n", pi);
1591 rte_eth_macaddr_get(pi, &mac_addr);
1592 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1593 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1594 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1595 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1597 /* at least one port started, need checking link status */
1598 need_check_link_status = 1;
1601 if (need_check_link_status == 1 && !no_link_check)
1602 check_all_ports_link_status(RTE_PORT_ALL);
1603 else if (need_check_link_status == 0)
1604 printf("Please stop the ports first\n");
1611 stop_port(portid_t pid)
1614 struct rte_port *port;
1615 int need_check_link_status = 0;
1622 if (port_id_is_invalid(pid, ENABLED_WARN))
1625 printf("Stopping ports...\n");
1627 RTE_ETH_FOREACH_DEV(pi) {
1628 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1631 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1632 printf("Please remove port %d from forwarding configuration.\n", pi);
1636 if (port_is_bonding_slave(pi)) {
1637 printf("Please remove port %d from bonded device.\n", pi);
1642 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1643 RTE_PORT_HANDLING) == 0)
1646 rte_eth_dev_stop(pi);
1648 if (rte_atomic16_cmpset(&(port->port_status),
1649 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1650 printf("Port %d can not be set into stopped\n", pi);
1651 need_check_link_status = 1;
1653 if (need_check_link_status && !no_link_check)
1654 check_all_ports_link_status(RTE_PORT_ALL);
1660 close_port(portid_t pid)
1663 struct rte_port *port;
1665 if (port_id_is_invalid(pid, ENABLED_WARN))
1668 printf("Closing ports...\n");
1670 RTE_ETH_FOREACH_DEV(pi) {
1671 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1674 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1675 printf("Please remove port %d from forwarding configuration.\n", pi);
1679 if (port_is_bonding_slave(pi)) {
1680 printf("Please remove port %d from bonded device.\n", pi);
1685 if (rte_atomic16_cmpset(&(port->port_status),
1686 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1687 printf("Port %d is already closed\n", pi);
1691 if (rte_atomic16_cmpset(&(port->port_status),
1692 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1693 printf("Port %d is now not stopped\n", pi);
1697 if (port->flow_list)
1698 port_flow_flush(pi);
1699 rte_eth_dev_close(pi);
1701 if (rte_atomic16_cmpset(&(port->port_status),
1702 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1703 printf("Port %d cannot be set to closed\n", pi);
1710 reset_port(portid_t pid)
1714 struct rte_port *port;
1716 if (port_id_is_invalid(pid, ENABLED_WARN))
1719 printf("Resetting ports...\n");
1721 RTE_ETH_FOREACH_DEV(pi) {
1722 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1725 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1726 printf("Please remove port %d from forwarding "
1727 "configuration.\n", pi);
1731 if (port_is_bonding_slave(pi)) {
1732 printf("Please remove port %d from bonded device.\n",
1737 diag = rte_eth_dev_reset(pi);
1740 port->need_reconfig = 1;
1741 port->need_reconfig_queues = 1;
1743 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1751 attach_port(char *identifier)
1754 unsigned int socket_id;
1756 printf("Attaching a new port...\n");
1758 if (identifier == NULL) {
1759 printf("Invalid parameters are specified\n");
1763 if (rte_eth_dev_attach(identifier, &pi))
1766 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1767 /* if socket_id is invalid, set to 0 */
1768 if (check_socket_id(socket_id) < 0)
1770 reconfig(pi, socket_id);
1771 rte_eth_promiscuous_enable(pi);
1773 nb_ports = rte_eth_dev_count();
1775 ports[pi].port_status = RTE_PORT_STOPPED;
1777 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1782 detach_port(uint8_t port_id)
1784 char name[RTE_ETH_NAME_MAX_LEN];
1786 printf("Detaching a port...\n");
1788 if (!port_is_closed(port_id)) {
1789 printf("Please close port first\n");
1793 if (ports[port_id].flow_list)
1794 port_flow_flush(port_id);
1796 if (rte_eth_dev_detach(port_id, name)) {
1797 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1801 nb_ports = rte_eth_dev_count();
1803 printf("Port '%s' is detached. Now total ports is %d\n",
1815 stop_packet_forwarding();
1817 if (ports != NULL) {
1819 RTE_ETH_FOREACH_DEV(pt_id) {
1820 printf("\nShutting down port %d...\n", pt_id);
1826 printf("\nBye...\n");
1829 typedef void (*cmd_func_t)(void);
1830 struct pmd_test_command {
1831 const char *cmd_name;
1832 cmd_func_t cmd_func;
1835 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1837 /* Check the link status of all ports in up to 9s, and print them finally */
1839 check_all_ports_link_status(uint32_t port_mask)
1841 #define CHECK_INTERVAL 100 /* 100ms */
1842 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1844 uint8_t count, all_ports_up, print_flag = 0;
1845 struct rte_eth_link link;
1847 printf("Checking link statuses...\n");
1849 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1851 RTE_ETH_FOREACH_DEV(portid) {
1852 if ((port_mask & (1 << portid)) == 0)
1854 memset(&link, 0, sizeof(link));
1855 rte_eth_link_get_nowait(portid, &link);
1856 /* print link status if flag set */
1857 if (print_flag == 1) {
1858 if (link.link_status)
1860 "Port%d Link Up. speed %u Mbps- %s\n",
1861 portid, link.link_speed,
1862 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1863 ("full-duplex") : ("half-duplex\n"));
1865 printf("Port %d Link Down\n", portid);
1868 /* clear all_ports_up flag if any link down */
1869 if (link.link_status == ETH_LINK_DOWN) {
1874 /* after finally printing all link status, get out */
1875 if (print_flag == 1)
1878 if (all_ports_up == 0) {
1880 rte_delay_ms(CHECK_INTERVAL);
1883 /* set the print_flag if all ports up or timeout */
1884 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1894 rmv_event_callback(void *arg)
1896 struct rte_eth_dev *dev;
1897 uint8_t port_id = (intptr_t)arg;
1899 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1900 dev = &rte_eth_devices[port_id];
1903 close_port(port_id);
1904 printf("removing device %s\n", dev->device->name);
1905 if (rte_eal_dev_detach(dev->device))
1906 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1910 /* This function is used by the interrupt thread */
1912 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1915 static const char * const event_desc[] = {
1916 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1917 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1918 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1919 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1920 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1921 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1922 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1923 [RTE_ETH_EVENT_MAX] = NULL,
1926 RTE_SET_USED(param);
1927 RTE_SET_USED(ret_param);
1929 if (type >= RTE_ETH_EVENT_MAX) {
1930 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1931 port_id, __func__, type);
1933 } else if (event_print_mask & (UINT32_C(1) << type)) {
1934 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1940 case RTE_ETH_EVENT_INTR_RMV:
1941 if (rte_eal_alarm_set(100000,
1942 rmv_event_callback, (void *)(intptr_t)port_id))
1943 fprintf(stderr, "Could not set up deferred device removal\n");
1952 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1956 uint8_t mapping_found = 0;
1958 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1959 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1960 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1961 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1962 tx_queue_stats_mappings[i].queue_id,
1963 tx_queue_stats_mappings[i].stats_counter_id);
1970 port->tx_queue_stats_mapping_enabled = 1;
1975 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1979 uint8_t mapping_found = 0;
1981 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1982 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1983 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1984 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1985 rx_queue_stats_mappings[i].queue_id,
1986 rx_queue_stats_mappings[i].stats_counter_id);
1993 port->rx_queue_stats_mapping_enabled = 1;
1998 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
2002 diag = set_tx_queue_stats_mapping_registers(pi, port);
2004 if (diag == -ENOTSUP) {
2005 port->tx_queue_stats_mapping_enabled = 0;
2006 printf("TX queue stats mapping not supported port id=%d\n", pi);
2009 rte_exit(EXIT_FAILURE,
2010 "set_tx_queue_stats_mapping_registers "
2011 "failed for port id=%d diag=%d\n",
2015 diag = set_rx_queue_stats_mapping_registers(pi, port);
2017 if (diag == -ENOTSUP) {
2018 port->rx_queue_stats_mapping_enabled = 0;
2019 printf("RX queue stats mapping not supported port id=%d\n", pi);
2022 rte_exit(EXIT_FAILURE,
2023 "set_rx_queue_stats_mapping_registers "
2024 "failed for port id=%d diag=%d\n",
2030 rxtx_port_config(struct rte_port *port)
2032 port->rx_conf = port->dev_info.default_rxconf;
2033 port->tx_conf = port->dev_info.default_txconf;
2035 /* Check if any RX/TX parameters have been passed */
2036 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2037 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2039 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2040 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2042 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2043 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2045 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2046 port->rx_conf.rx_free_thresh = rx_free_thresh;
2048 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2049 port->rx_conf.rx_drop_en = rx_drop_en;
2051 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2052 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2054 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2055 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2057 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2058 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2060 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2061 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2063 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2064 port->tx_conf.tx_free_thresh = tx_free_thresh;
2066 if (txq_flags != RTE_PMD_PARAM_UNSET)
2067 port->tx_conf.txq_flags = txq_flags;
2071 init_port_config(void)
2074 struct rte_port *port;
2076 RTE_ETH_FOREACH_DEV(pid) {
2078 port->dev_conf.rxmode = rx_mode;
2079 port->dev_conf.fdir_conf = fdir_conf;
2081 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2082 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2084 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2085 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2088 if (port->dcb_flag == 0) {
2089 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2090 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2092 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2095 rxtx_port_config(port);
2097 rte_eth_macaddr_get(pid, &port->eth_addr);
2099 map_port_queue_stats_mapping_registers(pid, port);
2100 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2101 rte_pmd_ixgbe_bypass_init(pid);
2104 if (lsc_interrupt &&
2105 (rte_eth_devices[pid].data->dev_flags &
2106 RTE_ETH_DEV_INTR_LSC))
2107 port->dev_conf.intr_conf.lsc = 1;
2108 if (rmv_interrupt &&
2109 (rte_eth_devices[pid].data->dev_flags &
2110 RTE_ETH_DEV_INTR_RMV))
2111 port->dev_conf.intr_conf.rmv = 1;
2115 void set_port_slave_flag(portid_t slave_pid)
2117 struct rte_port *port;
2119 port = &ports[slave_pid];
2120 port->slave_flag = 1;
2123 void clear_port_slave_flag(portid_t slave_pid)
2125 struct rte_port *port;
2127 port = &ports[slave_pid];
2128 port->slave_flag = 0;
2131 uint8_t port_is_bonding_slave(portid_t slave_pid)
2133 struct rte_port *port;
2135 port = &ports[slave_pid];
2136 return port->slave_flag;
2139 const uint16_t vlan_tags[] = {
2140 0, 1, 2, 3, 4, 5, 6, 7,
2141 8, 9, 10, 11, 12, 13, 14, 15,
2142 16, 17, 18, 19, 20, 21, 22, 23,
2143 24, 25, 26, 27, 28, 29, 30, 31
2147 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2148 enum dcb_mode_enable dcb_mode,
2149 enum rte_eth_nb_tcs num_tcs,
2155 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2156 * given above, and the number of traffic classes available for use.
2158 if (dcb_mode == DCB_VT_ENABLED) {
2159 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2160 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2161 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2162 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2164 /* VMDQ+DCB RX and TX configurations */
2165 vmdq_rx_conf->enable_default_pool = 0;
2166 vmdq_rx_conf->default_pool = 0;
2167 vmdq_rx_conf->nb_queue_pools =
2168 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2169 vmdq_tx_conf->nb_queue_pools =
2170 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2172 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2173 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2174 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2175 vmdq_rx_conf->pool_map[i].pools =
2176 1 << (i % vmdq_rx_conf->nb_queue_pools);
2178 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2179 vmdq_rx_conf->dcb_tc[i] = i;
2180 vmdq_tx_conf->dcb_tc[i] = i;
2183 /* set DCB mode of RX and TX of multiple queues */
2184 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2185 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2187 struct rte_eth_dcb_rx_conf *rx_conf =
2188 ð_conf->rx_adv_conf.dcb_rx_conf;
2189 struct rte_eth_dcb_tx_conf *tx_conf =
2190 ð_conf->tx_adv_conf.dcb_tx_conf;
2192 rx_conf->nb_tcs = num_tcs;
2193 tx_conf->nb_tcs = num_tcs;
2195 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2196 rx_conf->dcb_tc[i] = i % num_tcs;
2197 tx_conf->dcb_tc[i] = i % num_tcs;
2199 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2200 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2201 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2205 eth_conf->dcb_capability_en =
2206 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2208 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2214 init_port_dcb_config(portid_t pid,
2215 enum dcb_mode_enable dcb_mode,
2216 enum rte_eth_nb_tcs num_tcs,
2219 struct rte_eth_conf port_conf;
2220 struct rte_port *rte_port;
2224 rte_port = &ports[pid];
2226 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2227 /* Enter DCB configuration status */
2230 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2231 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2234 port_conf.rxmode.hw_vlan_filter = 1;
2237 * Write the configuration into the device.
2238 * Set the numbers of RX & TX queues to 0, so
2239 * the RX & TX queues will not be setup.
2241 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2243 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2245 /* If dev_info.vmdq_pool_base is greater than 0,
2246 * the queue id of vmdq pools is started after pf queues.
2248 if (dcb_mode == DCB_VT_ENABLED &&
2249 rte_port->dev_info.vmdq_pool_base > 0) {
2250 printf("VMDQ_DCB multi-queue mode is nonsensical"
2251 " for port %d.", pid);
2255 /* Assume the ports in testpmd have the same dcb capability
2256 * and has the same number of rxq and txq in dcb mode
2258 if (dcb_mode == DCB_VT_ENABLED) {
2259 if (rte_port->dev_info.max_vfs > 0) {
2260 nb_rxq = rte_port->dev_info.nb_rx_queues;
2261 nb_txq = rte_port->dev_info.nb_tx_queues;
2263 nb_rxq = rte_port->dev_info.max_rx_queues;
2264 nb_txq = rte_port->dev_info.max_tx_queues;
2267 /*if vt is disabled, use all pf queues */
2268 if (rte_port->dev_info.vmdq_pool_base == 0) {
2269 nb_rxq = rte_port->dev_info.max_rx_queues;
2270 nb_txq = rte_port->dev_info.max_tx_queues;
2272 nb_rxq = (queueid_t)num_tcs;
2273 nb_txq = (queueid_t)num_tcs;
2277 rx_free_thresh = 64;
2279 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2281 rxtx_port_config(rte_port);
2283 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2284 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2285 rx_vft_set(pid, vlan_tags[i], 1);
2287 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2288 map_port_queue_stats_mapping_registers(pid, rte_port);
2290 rte_port->dcb_flag = 1;
2298 /* Configuration of Ethernet ports. */
2299 ports = rte_zmalloc("testpmd: ports",
2300 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2301 RTE_CACHE_LINE_SIZE);
2302 if (ports == NULL) {
2303 rte_exit(EXIT_FAILURE,
2304 "rte_zmalloc(%d struct rte_port) failed\n",
2320 const char clr[] = { 27, '[', '2', 'J', '\0' };
2321 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2323 /* Clear screen and move to top left */
2324 printf("%s%s", clr, top_left);
2326 printf("\nPort statistics ====================================");
2327 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2328 nic_stats_display(fwd_ports_ids[i]);
2332 signal_handler(int signum)
2334 if (signum == SIGINT || signum == SIGTERM) {
2335 printf("\nSignal %d received, preparing to exit...\n",
2337 #ifdef RTE_LIBRTE_PDUMP
2338 /* uninitialize packet capture framework */
2341 #ifdef RTE_LIBRTE_LATENCY_STATS
2342 rte_latencystats_uninit();
2345 /* Set flag to indicate the force termination. */
2347 /* exit with the expected status */
2348 signal(signum, SIG_DFL);
2349 kill(getpid(), signum);
2354 main(int argc, char** argv)
2359 signal(SIGINT, signal_handler);
2360 signal(SIGTERM, signal_handler);
2362 diag = rte_eal_init(argc, argv);
2364 rte_panic("Cannot init EAL\n");
2366 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2367 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2371 #ifdef RTE_LIBRTE_PDUMP
2372 /* initialize packet capture framework */
2373 rte_pdump_init(NULL);
2376 nb_ports = (portid_t) rte_eth_dev_count();
2378 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2380 /* allocate port structures, and init them */
2383 set_def_fwd_config();
2385 rte_panic("Empty set of forwarding logical cores - check the "
2386 "core mask supplied in the command parameters\n");
2388 /* Bitrate/latency stats disabled by default */
2389 #ifdef RTE_LIBRTE_BITRATE
2390 bitrate_enabled = 0;
2392 #ifdef RTE_LIBRTE_LATENCY_STATS
2393 latencystats_enabled = 0;
2399 launch_args_parse(argc, argv);
2401 if (tx_first && interactive)
2402 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2403 "interactive mode.\n");
2405 if (tx_first && lsc_interrupt) {
2406 printf("Warning: lsc_interrupt needs to be off when "
2407 " using tx_first. Disabling.\n");
2411 if (!nb_rxq && !nb_txq)
2412 printf("Warning: Either rx or tx queues should be non-zero\n");
2414 if (nb_rxq > 1 && nb_rxq > nb_txq)
2415 printf("Warning: nb_rxq=%d enables RSS configuration, "
2416 "but nb_txq=%d will prevent to fully test it.\n",
2420 if (start_port(RTE_PORT_ALL) != 0)
2421 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2423 /* set all ports to promiscuous mode by default */
2424 RTE_ETH_FOREACH_DEV(port_id)
2425 rte_eth_promiscuous_enable(port_id);
2427 /* Init metrics library */
2428 rte_metrics_init(rte_socket_id());
2430 #ifdef RTE_LIBRTE_LATENCY_STATS
2431 if (latencystats_enabled != 0) {
2432 int ret = rte_latencystats_init(1, NULL);
2434 printf("Warning: latencystats init()"
2435 " returned error %d\n", ret);
2436 printf("Latencystats running on lcore %d\n",
2437 latencystats_lcore_id);
2441 /* Setup bitrate stats */
2442 #ifdef RTE_LIBRTE_BITRATE
2443 if (bitrate_enabled != 0) {
2444 bitrate_data = rte_stats_bitrate_create();
2445 if (bitrate_data == NULL)
2446 rte_exit(EXIT_FAILURE,
2447 "Could not allocate bitrate data.\n");
2448 rte_stats_bitrate_reg(bitrate_data);
2452 #ifdef RTE_LIBRTE_CMDLINE
2453 if (strlen(cmdline_filename) != 0)
2454 cmdline_read_from_file(cmdline_filename);
2456 if (interactive == 1) {
2458 printf("Start automatic packet forwarding\n");
2459 start_packet_forwarding(0);
2471 printf("No commandline core given, start packet forwarding\n");
2472 start_packet_forwarding(tx_first);
2473 if (stats_period != 0) {
2474 uint64_t prev_time = 0, cur_time, diff_time = 0;
2475 uint64_t timer_period;
2477 /* Convert to number of cycles */
2478 timer_period = stats_period * rte_get_timer_hz();
2480 while (f_quit == 0) {
2481 cur_time = rte_get_timer_cycles();
2482 diff_time += cur_time - prev_time;
2484 if (diff_time >= timer_period) {
2486 /* Reset the timer */
2489 /* Sleep to avoid unnecessary checks */
2490 prev_time = cur_time;
2495 printf("Press enter to exit\n");
2496 rc = read(0, &c, 1);