1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
16 #include <sys/queue.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
72 char cmdline_filename[PATH_MAX] = {0};
75 * NUMA support configuration.
76 * When set, the NUMA support attempts to dispatch the allocation of the
77 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78 * probed ports among the CPU sockets 0 and 1.
79 * Otherwise, all memory is allocated from CPU socket 0.
81 uint8_t numa_support = 1; /**< numa enabled by default */
84 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
87 uint8_t socket_num = UMA_NO_CONFIG;
90 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
95 * Record the Ethernet address of peer target ports to which packets are
97 * Must be instantiated with the ethernet addresses of peer traffic generator
100 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
101 portid_t nb_peer_eth_addrs = 0;
104 * Probed Target Environment.
106 struct rte_port *ports; /**< For all probed ethernet ports. */
107 portid_t nb_ports; /**< Number of probed ethernet ports. */
108 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
109 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
112 * Test Forwarding Configuration.
113 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
114 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
116 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
117 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
118 portid_t nb_cfg_ports; /**< Number of configured ports. */
119 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
121 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
122 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
124 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
125 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
128 * Forwarding engines.
130 struct fwd_engine * fwd_engines[] = {
139 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
141 &softnic_tm_bypass_engine,
143 #ifdef RTE_LIBRTE_IEEE1588
144 &ieee1588_fwd_engine,
149 struct fwd_config cur_fwd_config;
150 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
151 uint32_t retry_enabled;
152 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
153 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
155 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
156 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
157 * specified on command-line. */
158 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
161 * In container, it cannot terminate the process which running with 'stats-period'
162 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
167 * Configuration of packet segments used by the "txonly" processing engine.
169 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
170 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
171 TXONLY_DEF_PACKET_LEN,
173 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
176 /**< Split policy for packets to TX. */
178 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
179 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181 /* current configuration is in DCB or not,0 means it is not in DCB mode */
182 uint8_t dcb_config = 0;
184 /* Whether the dcb is in testing status */
185 uint8_t dcb_test = 0;
188 * Configurable number of RX/TX queues.
190 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
191 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
194 * Configurable number of RX/TX ring descriptors.
196 #define RTE_TEST_RX_DESC_DEFAULT 128
197 #define RTE_TEST_TX_DESC_DEFAULT 512
198 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
199 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201 #define RTE_PMD_PARAM_UNSET -1
203 * Configurable values of RX and TX ring threshold registers.
206 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
215 * Configurable value of RX free threshold.
217 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
220 * Configurable value of RX drop enable.
222 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
225 * Configurable value of TX free threshold.
227 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
230 * Configurable value of TX RS bit threshold.
232 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
235 * Receive Side Scaling (RSS) configuration.
237 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
240 * Port topology configuration
242 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
245 * Avoids to flush all the RX streams before starts forwarding.
247 uint8_t no_flush_rx = 0; /* flush by default */
250 * Flow API isolated mode.
252 uint8_t flow_isolate_all;
255 * Avoids to check link status when starting/stopping a port.
257 uint8_t no_link_check = 0; /* check by default */
260 * Enable link status change notification
262 uint8_t lsc_interrupt = 1; /* enabled by default */
265 * Enable device removal notification.
267 uint8_t rmv_interrupt = 1; /* enabled by default */
270 * Display or mask ether events
271 * Default to all events except VF_MBOX
273 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
274 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
275 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
276 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
277 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
278 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
281 * NIC bypass mode configuration options.
284 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
285 /* The NIC bypass watchdog timeout. */
286 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
290 #ifdef RTE_LIBRTE_LATENCY_STATS
293 * Set when latency stats is enabled in the commandline
295 uint8_t latencystats_enabled;
298 * Lcore ID to serive latency statistics.
300 lcoreid_t latencystats_lcore_id = -1;
305 * Ethernet device configuration.
307 struct rte_eth_rxmode rx_mode = {
308 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
309 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
310 .ignore_offload_bitfield = 1,
313 struct rte_eth_txmode tx_mode = {
314 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
317 struct rte_fdir_conf fdir_conf = {
318 .mode = RTE_FDIR_MODE_NONE,
319 .pballoc = RTE_FDIR_PBALLOC_64K,
320 .status = RTE_FDIR_REPORT_STATUS,
322 .vlan_tci_mask = 0x0,
324 .src_ip = 0xFFFFFFFF,
325 .dst_ip = 0xFFFFFFFF,
328 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
329 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331 .src_port_mask = 0xFFFF,
332 .dst_port_mask = 0xFFFF,
333 .mac_addr_byte_mask = 0xFF,
334 .tunnel_type_mask = 1,
335 .tunnel_id_mask = 0xFFFFFFFF,
340 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
342 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
343 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
345 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
346 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
348 uint16_t nb_tx_queue_stats_mappings = 0;
349 uint16_t nb_rx_queue_stats_mappings = 0;
352 * Display zero values by default for xstats
354 uint8_t xstats_hide_zero;
356 unsigned int num_sockets = 0;
357 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
359 #ifdef RTE_LIBRTE_BITRATE
360 /* Bitrate statistics */
361 struct rte_stats_bitrates *bitrate_data;
362 lcoreid_t bitrate_lcore_id;
363 uint8_t bitrate_enabled;
366 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
367 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
369 /* Forward function declarations */
370 static void map_port_queue_stats_mapping_registers(portid_t pi,
371 struct rte_port *port);
372 static void check_all_ports_link_status(uint32_t port_mask);
373 static int eth_event_callback(portid_t port_id,
374 enum rte_eth_event_type type,
375 void *param, void *ret_param);
378 * Check if all the ports are started.
379 * If yes, return positive value. If not, return zero.
381 static int all_ports_started(void);
383 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
384 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
387 * Helper function to check if socket is already discovered.
388 * If yes, return positive value. If not, return zero.
391 new_socket_id(unsigned int socket_id)
395 for (i = 0; i < num_sockets; i++) {
396 if (socket_ids[i] == socket_id)
403 * Setup default configuration.
406 set_default_fwd_lcores_config(void)
410 unsigned int sock_num;
413 for (i = 0; i < RTE_MAX_LCORE; i++) {
414 sock_num = rte_lcore_to_socket_id(i);
415 if (new_socket_id(sock_num)) {
416 if (num_sockets >= RTE_MAX_NUMA_NODES) {
417 rte_exit(EXIT_FAILURE,
418 "Total sockets greater than %u\n",
421 socket_ids[num_sockets++] = sock_num;
423 if (!rte_lcore_is_enabled(i))
425 if (i == rte_get_master_lcore())
427 fwd_lcores_cpuids[nb_lc++] = i;
429 nb_lcores = (lcoreid_t) nb_lc;
430 nb_cfg_lcores = nb_lcores;
435 set_def_peer_eth_addrs(void)
439 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
440 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
441 peer_eth_addrs[i].addr_bytes[5] = i;
446 set_default_fwd_ports_config(void)
451 RTE_ETH_FOREACH_DEV(pt_id)
452 fwd_ports_ids[i++] = pt_id;
454 nb_cfg_ports = nb_ports;
455 nb_fwd_ports = nb_ports;
459 set_def_fwd_config(void)
461 set_default_fwd_lcores_config();
462 set_def_peer_eth_addrs();
463 set_default_fwd_ports_config();
467 * Configuration initialisation done once at init time.
470 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
471 unsigned int socket_id)
473 char pool_name[RTE_MEMPOOL_NAMESIZE];
474 struct rte_mempool *rte_mp = NULL;
477 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
478 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
481 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
482 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
485 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
486 mb_size, (unsigned) mb_mempool_cache,
487 sizeof(struct rte_pktmbuf_pool_private),
492 if (rte_mempool_populate_anon(rte_mp) == 0) {
493 rte_mempool_free(rte_mp);
497 rte_pktmbuf_pool_init(rte_mp, NULL);
498 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
500 /* wrapper to rte_mempool_create() */
501 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
502 rte_mbuf_best_mempool_ops());
503 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
504 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
508 if (rte_mp == NULL) {
509 rte_exit(EXIT_FAILURE,
510 "Creation of mbuf pool for socket %u failed: %s\n",
511 socket_id, rte_strerror(rte_errno));
512 } else if (verbose_level > 0) {
513 rte_mempool_dump(stdout, rte_mp);
518 * Check given socket id is valid or not with NUMA mode,
519 * if valid, return 0, else return -1
522 check_socket_id(const unsigned int socket_id)
524 static int warning_once = 0;
526 if (new_socket_id(socket_id)) {
527 if (!warning_once && numa_support)
528 printf("Warning: NUMA should be configured manually by"
529 " using --port-numa-config and"
530 " --ring-numa-config parameters along with"
539 * Get the allowed maximum number of RX queues.
540 * *pid return the port id which has minimal value of
541 * max_rx_queues in all ports.
544 get_allowed_max_nb_rxq(portid_t *pid)
546 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
548 struct rte_eth_dev_info dev_info;
550 RTE_ETH_FOREACH_DEV(pi) {
551 rte_eth_dev_info_get(pi, &dev_info);
552 if (dev_info.max_rx_queues < allowed_max_rxq) {
553 allowed_max_rxq = dev_info.max_rx_queues;
557 return allowed_max_rxq;
561 * Check input rxq is valid or not.
562 * If input rxq is not greater than any of maximum number
563 * of RX queues of all ports, it is valid.
564 * if valid, return 0, else return -1
567 check_nb_rxq(queueid_t rxq)
569 queueid_t allowed_max_rxq;
572 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
573 if (rxq > allowed_max_rxq) {
574 printf("Fail: input rxq (%u) can't be greater "
575 "than max_rx_queues (%u) of port %u\n",
585 * Get the allowed maximum number of TX queues.
586 * *pid return the port id which has minimal value of
587 * max_tx_queues in all ports.
590 get_allowed_max_nb_txq(portid_t *pid)
592 queueid_t allowed_max_txq = MAX_QUEUE_ID;
594 struct rte_eth_dev_info dev_info;
596 RTE_ETH_FOREACH_DEV(pi) {
597 rte_eth_dev_info_get(pi, &dev_info);
598 if (dev_info.max_tx_queues < allowed_max_txq) {
599 allowed_max_txq = dev_info.max_tx_queues;
603 return allowed_max_txq;
607 * Check input txq is valid or not.
608 * If input txq is not greater than any of maximum number
609 * of TX queues of all ports, it is valid.
610 * if valid, return 0, else return -1
613 check_nb_txq(queueid_t txq)
615 queueid_t allowed_max_txq;
618 allowed_max_txq = get_allowed_max_nb_txq(&pid);
619 if (txq > allowed_max_txq) {
620 printf("Fail: input txq (%u) can't be greater "
621 "than max_tx_queues (%u) of port %u\n",
634 struct rte_port *port;
635 struct rte_mempool *mbp;
636 unsigned int nb_mbuf_per_pool;
638 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
639 struct rte_gro_param gro_param;
642 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
645 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
646 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
647 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
650 /* Configuration of logical cores. */
651 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
652 sizeof(struct fwd_lcore *) * nb_lcores,
653 RTE_CACHE_LINE_SIZE);
654 if (fwd_lcores == NULL) {
655 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
656 "failed\n", nb_lcores);
658 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
659 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
660 sizeof(struct fwd_lcore),
661 RTE_CACHE_LINE_SIZE);
662 if (fwd_lcores[lc_id] == NULL) {
663 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
666 fwd_lcores[lc_id]->cpuid_idx = lc_id;
669 RTE_ETH_FOREACH_DEV(pid) {
671 /* Apply default TxRx configuration for all ports */
672 port->dev_conf.txmode = tx_mode;
673 port->dev_conf.rxmode = rx_mode;
674 rte_eth_dev_info_get(pid, &port->dev_info);
675 if (!(port->dev_info.tx_offload_capa &
676 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
677 port->dev_conf.txmode.offloads &=
678 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
679 if (!(port->dev_info.rx_offload_capa &
680 DEV_RX_OFFLOAD_CRC_STRIP))
681 port->dev_conf.rxmode.offloads &=
682 ~DEV_RX_OFFLOAD_CRC_STRIP;
684 if (port_numa[pid] != NUMA_NO_CONFIG)
685 port_per_socket[port_numa[pid]]++;
687 uint32_t socket_id = rte_eth_dev_socket_id(pid);
689 /* if socket_id is invalid, set to 0 */
690 if (check_socket_id(socket_id) < 0)
692 port_per_socket[socket_id]++;
696 /* set flag to initialize port/queue */
697 port->need_reconfig = 1;
698 port->need_reconfig_queues = 1;
702 * Create pools of mbuf.
703 * If NUMA support is disabled, create a single pool of mbuf in
704 * socket 0 memory by default.
705 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
707 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
708 * nb_txd can be configured at run time.
710 if (param_total_num_mbufs)
711 nb_mbuf_per_pool = param_total_num_mbufs;
713 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
714 (nb_lcores * mb_mempool_cache) +
715 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
716 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
722 for (i = 0; i < num_sockets; i++)
723 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
726 if (socket_num == UMA_NO_CONFIG)
727 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
729 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
735 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
736 DEV_TX_OFFLOAD_GRE_TNL_TSO;
738 * Records which Mbuf pool to use by each logical core, if needed.
740 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
741 mbp = mbuf_pool_find(
742 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
745 mbp = mbuf_pool_find(0);
746 fwd_lcores[lc_id]->mbp = mbp;
747 /* initialize GSO context */
748 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
749 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
750 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
751 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
753 fwd_lcores[lc_id]->gso_ctx.flag = 0;
756 /* Configuration of packet forwarding streams. */
757 if (init_fwd_streams() < 0)
758 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
762 /* create a gro context for each lcore */
763 gro_param.gro_types = RTE_GRO_TCP_IPV4;
764 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
765 gro_param.max_item_per_flow = MAX_PKT_BURST;
766 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
767 gro_param.socket_id = rte_lcore_to_socket_id(
768 fwd_lcores_cpuids[lc_id]);
769 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
770 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
771 rte_exit(EXIT_FAILURE,
772 "rte_gro_ctx_create() failed\n");
779 reconfig(portid_t new_port_id, unsigned socket_id)
781 struct rte_port *port;
783 /* Reconfiguration of Ethernet ports. */
784 port = &ports[new_port_id];
785 rte_eth_dev_info_get(new_port_id, &port->dev_info);
787 /* set flag to initialize port/queue */
788 port->need_reconfig = 1;
789 port->need_reconfig_queues = 1;
790 port->socket_id = socket_id;
797 init_fwd_streams(void)
800 struct rte_port *port;
801 streamid_t sm_id, nb_fwd_streams_new;
804 /* set socket id according to numa or not */
805 RTE_ETH_FOREACH_DEV(pid) {
807 if (nb_rxq > port->dev_info.max_rx_queues) {
808 printf("Fail: nb_rxq(%d) is greater than "
809 "max_rx_queues(%d)\n", nb_rxq,
810 port->dev_info.max_rx_queues);
813 if (nb_txq > port->dev_info.max_tx_queues) {
814 printf("Fail: nb_txq(%d) is greater than "
815 "max_tx_queues(%d)\n", nb_txq,
816 port->dev_info.max_tx_queues);
820 if (port_numa[pid] != NUMA_NO_CONFIG)
821 port->socket_id = port_numa[pid];
823 port->socket_id = rte_eth_dev_socket_id(pid);
825 /* if socket_id is invalid, set to 0 */
826 if (check_socket_id(port->socket_id) < 0)
831 if (socket_num == UMA_NO_CONFIG)
834 port->socket_id = socket_num;
838 q = RTE_MAX(nb_rxq, nb_txq);
840 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
843 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
844 if (nb_fwd_streams_new == nb_fwd_streams)
847 if (fwd_streams != NULL) {
848 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
849 if (fwd_streams[sm_id] == NULL)
851 rte_free(fwd_streams[sm_id]);
852 fwd_streams[sm_id] = NULL;
854 rte_free(fwd_streams);
859 nb_fwd_streams = nb_fwd_streams_new;
860 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
861 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
862 if (fwd_streams == NULL)
863 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
864 "failed\n", nb_fwd_streams);
866 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
867 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
868 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
869 if (fwd_streams[sm_id] == NULL)
870 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
877 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
879 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
881 unsigned int total_burst;
882 unsigned int nb_burst;
883 unsigned int burst_stats[3];
884 uint16_t pktnb_stats[3];
886 int burst_percent[3];
889 * First compute the total number of packet bursts and the
890 * two highest numbers of bursts of the same number of packets.
893 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
894 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
895 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
896 nb_burst = pbs->pkt_burst_spread[nb_pkt];
899 total_burst += nb_burst;
900 if (nb_burst > burst_stats[0]) {
901 burst_stats[1] = burst_stats[0];
902 pktnb_stats[1] = pktnb_stats[0];
903 burst_stats[0] = nb_burst;
904 pktnb_stats[0] = nb_pkt;
907 if (total_burst == 0)
909 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
910 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
911 burst_percent[0], (int) pktnb_stats[0]);
912 if (burst_stats[0] == total_burst) {
916 if (burst_stats[0] + burst_stats[1] == total_burst) {
917 printf(" + %d%% of %d pkts]\n",
918 100 - burst_percent[0], pktnb_stats[1]);
921 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
922 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
923 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
924 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
927 printf(" + %d%% of %d pkts + %d%% of others]\n",
928 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
930 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
933 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
935 struct rte_port *port;
938 static const char *fwd_stats_border = "----------------------";
940 port = &ports[port_id];
941 printf("\n %s Forward statistics for port %-2d %s\n",
942 fwd_stats_border, port_id, fwd_stats_border);
944 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
945 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
947 stats->ipackets, stats->imissed,
948 (uint64_t) (stats->ipackets + stats->imissed));
950 if (cur_fwd_eng == &csum_fwd_engine)
951 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
952 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
953 if ((stats->ierrors + stats->rx_nombuf) > 0) {
954 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
955 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
958 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
960 stats->opackets, port->tx_dropped,
961 (uint64_t) (stats->opackets + port->tx_dropped));
964 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
966 stats->ipackets, stats->imissed,
967 (uint64_t) (stats->ipackets + stats->imissed));
969 if (cur_fwd_eng == &csum_fwd_engine)
970 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
971 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
972 if ((stats->ierrors + stats->rx_nombuf) > 0) {
973 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
974 printf(" RX-nombufs: %14"PRIu64"\n",
978 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
980 stats->opackets, port->tx_dropped,
981 (uint64_t) (stats->opackets + port->tx_dropped));
984 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
986 pkt_burst_stats_display("RX",
987 &port->rx_stream->rx_burst_stats);
989 pkt_burst_stats_display("TX",
990 &port->tx_stream->tx_burst_stats);
993 if (port->rx_queue_stats_mapping_enabled) {
995 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
996 printf(" Stats reg %2d RX-packets:%14"PRIu64
997 " RX-errors:%14"PRIu64
998 " RX-bytes:%14"PRIu64"\n",
999 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1003 if (port->tx_queue_stats_mapping_enabled) {
1004 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1005 printf(" Stats reg %2d TX-packets:%14"PRIu64
1006 " TX-bytes:%14"PRIu64"\n",
1007 i, stats->q_opackets[i], stats->q_obytes[i]);
1011 printf(" %s--------------------------------%s\n",
1012 fwd_stats_border, fwd_stats_border);
1016 fwd_stream_stats_display(streamid_t stream_id)
1018 struct fwd_stream *fs;
1019 static const char *fwd_top_stats_border = "-------";
1021 fs = fwd_streams[stream_id];
1022 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1023 (fs->fwd_dropped == 0))
1025 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1026 "TX Port=%2d/Queue=%2d %s\n",
1027 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1028 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1029 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1030 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1032 /* if checksum mode */
1033 if (cur_fwd_eng == &csum_fwd_engine) {
1034 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1035 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1038 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1039 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1040 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1045 flush_fwd_rx_queues(void)
1047 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1054 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1055 uint64_t timer_period;
1057 /* convert to number of cycles */
1058 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1060 for (j = 0; j < 2; j++) {
1061 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1062 for (rxq = 0; rxq < nb_rxq; rxq++) {
1063 port_id = fwd_ports_ids[rxp];
1065 * testpmd can stuck in the below do while loop
1066 * if rte_eth_rx_burst() always returns nonzero
1067 * packets. So timer is added to exit this loop
1068 * after 1sec timer expiry.
1070 prev_tsc = rte_rdtsc();
1072 nb_rx = rte_eth_rx_burst(port_id, rxq,
1073 pkts_burst, MAX_PKT_BURST);
1074 for (i = 0; i < nb_rx; i++)
1075 rte_pktmbuf_free(pkts_burst[i]);
1077 cur_tsc = rte_rdtsc();
1078 diff_tsc = cur_tsc - prev_tsc;
1079 timer_tsc += diff_tsc;
1080 } while ((nb_rx > 0) &&
1081 (timer_tsc < timer_period));
1085 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1090 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1092 struct fwd_stream **fsm;
1095 #ifdef RTE_LIBRTE_BITRATE
1096 uint64_t tics_per_1sec;
1097 uint64_t tics_datum;
1098 uint64_t tics_current;
1099 uint8_t idx_port, cnt_ports;
1101 cnt_ports = rte_eth_dev_count();
1102 tics_datum = rte_rdtsc();
1103 tics_per_1sec = rte_get_timer_hz();
1105 fsm = &fwd_streams[fc->stream_idx];
1106 nb_fs = fc->stream_nb;
1108 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1109 (*pkt_fwd)(fsm[sm_id]);
1110 #ifdef RTE_LIBRTE_BITRATE
1111 if (bitrate_enabled != 0 &&
1112 bitrate_lcore_id == rte_lcore_id()) {
1113 tics_current = rte_rdtsc();
1114 if (tics_current - tics_datum >= tics_per_1sec) {
1115 /* Periodic bitrate calculation */
1117 idx_port < cnt_ports;
1119 rte_stats_bitrate_calc(bitrate_data,
1121 tics_datum = tics_current;
1125 #ifdef RTE_LIBRTE_LATENCY_STATS
1126 if (latencystats_enabled != 0 &&
1127 latencystats_lcore_id == rte_lcore_id())
1128 rte_latencystats_update();
1131 } while (! fc->stopped);
1135 start_pkt_forward_on_core(void *fwd_arg)
1137 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1138 cur_fwd_config.fwd_eng->packet_fwd);
1143 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1144 * Used to start communication flows in network loopback test configurations.
1147 run_one_txonly_burst_on_core(void *fwd_arg)
1149 struct fwd_lcore *fwd_lc;
1150 struct fwd_lcore tmp_lcore;
1152 fwd_lc = (struct fwd_lcore *) fwd_arg;
1153 tmp_lcore = *fwd_lc;
1154 tmp_lcore.stopped = 1;
1155 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1160 * Launch packet forwarding:
1161 * - Setup per-port forwarding context.
1162 * - launch logical cores with their forwarding configuration.
1165 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1167 port_fwd_begin_t port_fwd_begin;
1172 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1173 if (port_fwd_begin != NULL) {
1174 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1175 (*port_fwd_begin)(fwd_ports_ids[i]);
1177 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1178 lc_id = fwd_lcores_cpuids[i];
1179 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1180 fwd_lcores[i]->stopped = 0;
1181 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1182 fwd_lcores[i], lc_id);
1184 printf("launch lcore %u failed - diag=%d\n",
1191 * Launch packet forwarding configuration.
1194 start_packet_forwarding(int with_tx_first)
1196 port_fwd_begin_t port_fwd_begin;
1197 port_fwd_end_t port_fwd_end;
1198 struct rte_port *port;
1203 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1204 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1206 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1207 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1209 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1210 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1211 (!nb_rxq || !nb_txq))
1212 rte_exit(EXIT_FAILURE,
1213 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1214 cur_fwd_eng->fwd_mode_name);
1216 if (all_ports_started() == 0) {
1217 printf("Not all ports were started\n");
1220 if (test_done == 0) {
1221 printf("Packet forwarding already started\n");
1225 if (init_fwd_streams() < 0) {
1226 printf("Fail from init_fwd_streams()\n");
1231 for (i = 0; i < nb_fwd_ports; i++) {
1232 pt_id = fwd_ports_ids[i];
1233 port = &ports[pt_id];
1234 if (!port->dcb_flag) {
1235 printf("In DCB mode, all forwarding ports must "
1236 "be configured in this mode.\n");
1240 if (nb_fwd_lcores == 1) {
1241 printf("In DCB mode,the nb forwarding cores "
1242 "should be larger than 1.\n");
1249 flush_fwd_rx_queues();
1252 pkt_fwd_config_display(&cur_fwd_config);
1253 rxtx_config_display();
1255 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1256 pt_id = fwd_ports_ids[i];
1257 port = &ports[pt_id];
1258 rte_eth_stats_get(pt_id, &port->stats);
1259 port->tx_dropped = 0;
1261 map_port_queue_stats_mapping_registers(pt_id, port);
1263 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1264 fwd_streams[sm_id]->rx_packets = 0;
1265 fwd_streams[sm_id]->tx_packets = 0;
1266 fwd_streams[sm_id]->fwd_dropped = 0;
1267 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1268 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1270 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1271 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1272 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1273 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1274 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1276 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1277 fwd_streams[sm_id]->core_cycles = 0;
1280 if (with_tx_first) {
1281 port_fwd_begin = tx_only_engine.port_fwd_begin;
1282 if (port_fwd_begin != NULL) {
1283 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1284 (*port_fwd_begin)(fwd_ports_ids[i]);
1286 while (with_tx_first--) {
1287 launch_packet_forwarding(
1288 run_one_txonly_burst_on_core);
1289 rte_eal_mp_wait_lcore();
1291 port_fwd_end = tx_only_engine.port_fwd_end;
1292 if (port_fwd_end != NULL) {
1293 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1294 (*port_fwd_end)(fwd_ports_ids[i]);
1297 launch_packet_forwarding(start_pkt_forward_on_core);
1301 stop_packet_forwarding(void)
1303 struct rte_eth_stats stats;
1304 struct rte_port *port;
1305 port_fwd_end_t port_fwd_end;
1310 uint64_t total_recv;
1311 uint64_t total_xmit;
1312 uint64_t total_rx_dropped;
1313 uint64_t total_tx_dropped;
1314 uint64_t total_rx_nombuf;
1315 uint64_t tx_dropped;
1316 uint64_t rx_bad_ip_csum;
1317 uint64_t rx_bad_l4_csum;
1318 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1319 uint64_t fwd_cycles;
1322 static const char *acc_stats_border = "+++++++++++++++";
1325 printf("Packet forwarding not started\n");
1328 printf("Telling cores to stop...");
1329 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1330 fwd_lcores[lc_id]->stopped = 1;
1331 printf("\nWaiting for lcores to finish...\n");
1332 rte_eal_mp_wait_lcore();
1333 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1334 if (port_fwd_end != NULL) {
1335 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1336 pt_id = fwd_ports_ids[i];
1337 (*port_fwd_end)(pt_id);
1340 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1343 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1344 if (cur_fwd_config.nb_fwd_streams >
1345 cur_fwd_config.nb_fwd_ports) {
1346 fwd_stream_stats_display(sm_id);
1347 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1348 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1350 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1352 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1355 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1356 tx_dropped = (uint64_t) (tx_dropped +
1357 fwd_streams[sm_id]->fwd_dropped);
1358 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1361 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1362 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1363 fwd_streams[sm_id]->rx_bad_ip_csum);
1364 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1368 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1369 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1370 fwd_streams[sm_id]->rx_bad_l4_csum);
1371 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1374 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1375 fwd_cycles = (uint64_t) (fwd_cycles +
1376 fwd_streams[sm_id]->core_cycles);
1381 total_rx_dropped = 0;
1382 total_tx_dropped = 0;
1383 total_rx_nombuf = 0;
1384 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1385 pt_id = fwd_ports_ids[i];
1387 port = &ports[pt_id];
1388 rte_eth_stats_get(pt_id, &stats);
1389 stats.ipackets -= port->stats.ipackets;
1390 port->stats.ipackets = 0;
1391 stats.opackets -= port->stats.opackets;
1392 port->stats.opackets = 0;
1393 stats.ibytes -= port->stats.ibytes;
1394 port->stats.ibytes = 0;
1395 stats.obytes -= port->stats.obytes;
1396 port->stats.obytes = 0;
1397 stats.imissed -= port->stats.imissed;
1398 port->stats.imissed = 0;
1399 stats.oerrors -= port->stats.oerrors;
1400 port->stats.oerrors = 0;
1401 stats.rx_nombuf -= port->stats.rx_nombuf;
1402 port->stats.rx_nombuf = 0;
1404 total_recv += stats.ipackets;
1405 total_xmit += stats.opackets;
1406 total_rx_dropped += stats.imissed;
1407 total_tx_dropped += port->tx_dropped;
1408 total_rx_nombuf += stats.rx_nombuf;
1410 fwd_port_stats_display(pt_id, &stats);
1413 printf("\n %s Accumulated forward statistics for all ports"
1415 acc_stats_border, acc_stats_border);
1416 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1418 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1420 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1421 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1422 if (total_rx_nombuf > 0)
1423 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1424 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1426 acc_stats_border, acc_stats_border);
1427 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1429 printf("\n CPU cycles/packet=%u (total cycles="
1430 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1431 (unsigned int)(fwd_cycles / total_recv),
1432 fwd_cycles, total_recv);
1434 printf("\nDone.\n");
1439 dev_set_link_up(portid_t pid)
1441 if (rte_eth_dev_set_link_up(pid) < 0)
1442 printf("\nSet link up fail.\n");
1446 dev_set_link_down(portid_t pid)
1448 if (rte_eth_dev_set_link_down(pid) < 0)
1449 printf("\nSet link down fail.\n");
1453 all_ports_started(void)
1456 struct rte_port *port;
1458 RTE_ETH_FOREACH_DEV(pi) {
1460 /* Check if there is a port which is not started */
1461 if ((port->port_status != RTE_PORT_STARTED) &&
1462 (port->slave_flag == 0))
1466 /* No port is not started */
1471 port_is_stopped(portid_t port_id)
1473 struct rte_port *port = &ports[port_id];
1475 if ((port->port_status != RTE_PORT_STOPPED) &&
1476 (port->slave_flag == 0))
1482 all_ports_stopped(void)
1486 RTE_ETH_FOREACH_DEV(pi) {
1487 if (!port_is_stopped(pi))
1495 port_is_started(portid_t port_id)
1497 if (port_id_is_invalid(port_id, ENABLED_WARN))
1500 if (ports[port_id].port_status != RTE_PORT_STARTED)
1507 port_is_closed(portid_t port_id)
1509 if (port_id_is_invalid(port_id, ENABLED_WARN))
1512 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1519 start_port(portid_t pid)
1521 int diag, need_check_link_status = -1;
1524 struct rte_port *port;
1525 struct ether_addr mac_addr;
1526 enum rte_eth_event_type event_type;
1528 if (port_id_is_invalid(pid, ENABLED_WARN))
1533 RTE_ETH_FOREACH_DEV(pi) {
1534 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1537 need_check_link_status = 0;
1539 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1540 RTE_PORT_HANDLING) == 0) {
1541 printf("Port %d is now not stopped\n", pi);
1545 if (port->need_reconfig > 0) {
1546 port->need_reconfig = 0;
1548 if (flow_isolate_all) {
1549 int ret = port_flow_isolate(pi, 1);
1551 printf("Failed to apply isolated"
1552 " mode on port %d\n", pi);
1557 printf("Configuring Port %d (socket %u)\n", pi,
1559 /* configure port */
1560 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1563 if (rte_atomic16_cmpset(&(port->port_status),
1564 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1565 printf("Port %d can not be set back "
1566 "to stopped\n", pi);
1567 printf("Fail to configure port %d\n", pi);
1568 /* try to reconfigure port next time */
1569 port->need_reconfig = 1;
1573 if (port->need_reconfig_queues > 0) {
1574 port->need_reconfig_queues = 0;
1575 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1576 /* Apply Tx offloads configuration */
1577 port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1578 /* setup tx queues */
1579 for (qi = 0; qi < nb_txq; qi++) {
1580 if ((numa_support) &&
1581 (txring_numa[pi] != NUMA_NO_CONFIG))
1582 diag = rte_eth_tx_queue_setup(pi, qi,
1583 nb_txd,txring_numa[pi],
1586 diag = rte_eth_tx_queue_setup(pi, qi,
1587 nb_txd,port->socket_id,
1593 /* Fail to setup tx queue, return */
1594 if (rte_atomic16_cmpset(&(port->port_status),
1596 RTE_PORT_STOPPED) == 0)
1597 printf("Port %d can not be set back "
1598 "to stopped\n", pi);
1599 printf("Fail to configure port %d tx queues\n", pi);
1600 /* try to reconfigure queues next time */
1601 port->need_reconfig_queues = 1;
1604 /* Apply Rx offloads configuration */
1605 port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1606 /* setup rx queues */
1607 for (qi = 0; qi < nb_rxq; qi++) {
1608 if ((numa_support) &&
1609 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1610 struct rte_mempool * mp =
1611 mbuf_pool_find(rxring_numa[pi]);
1613 printf("Failed to setup RX queue:"
1614 "No mempool allocation"
1615 " on the socket %d\n",
1620 diag = rte_eth_rx_queue_setup(pi, qi,
1621 nb_rxd,rxring_numa[pi],
1622 &(port->rx_conf),mp);
1624 struct rte_mempool *mp =
1625 mbuf_pool_find(port->socket_id);
1627 printf("Failed to setup RX queue:"
1628 "No mempool allocation"
1629 " on the socket %d\n",
1633 diag = rte_eth_rx_queue_setup(pi, qi,
1634 nb_rxd,port->socket_id,
1635 &(port->rx_conf), mp);
1640 /* Fail to setup rx queue, return */
1641 if (rte_atomic16_cmpset(&(port->port_status),
1643 RTE_PORT_STOPPED) == 0)
1644 printf("Port %d can not be set back "
1645 "to stopped\n", pi);
1646 printf("Fail to configure port %d rx queues\n", pi);
1647 /* try to reconfigure queues next time */
1648 port->need_reconfig_queues = 1;
1654 if (rte_eth_dev_start(pi) < 0) {
1655 printf("Fail to start port %d\n", pi);
1657 /* Fail to setup rx queue, return */
1658 if (rte_atomic16_cmpset(&(port->port_status),
1659 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1660 printf("Port %d can not be set back to "
1665 if (rte_atomic16_cmpset(&(port->port_status),
1666 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1667 printf("Port %d can not be set into started\n", pi);
1669 rte_eth_macaddr_get(pi, &mac_addr);
1670 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1671 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1672 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1673 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1675 /* at least one port started, need checking link status */
1676 need_check_link_status = 1;
1679 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1680 event_type < RTE_ETH_EVENT_MAX;
1682 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1687 printf("Failed to setup even callback for event %d\n",
1693 if (need_check_link_status == 1 && !no_link_check)
1694 check_all_ports_link_status(RTE_PORT_ALL);
1695 else if (need_check_link_status == 0)
1696 printf("Please stop the ports first\n");
1703 stop_port(portid_t pid)
1706 struct rte_port *port;
1707 int need_check_link_status = 0;
1714 if (port_id_is_invalid(pid, ENABLED_WARN))
1717 printf("Stopping ports...\n");
1719 RTE_ETH_FOREACH_DEV(pi) {
1720 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1723 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1724 printf("Please remove port %d from forwarding configuration.\n", pi);
1728 if (port_is_bonding_slave(pi)) {
1729 printf("Please remove port %d from bonded device.\n", pi);
1734 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1735 RTE_PORT_HANDLING) == 0)
1738 rte_eth_dev_stop(pi);
1740 if (rte_atomic16_cmpset(&(port->port_status),
1741 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1742 printf("Port %d can not be set into stopped\n", pi);
1743 need_check_link_status = 1;
1745 if (need_check_link_status && !no_link_check)
1746 check_all_ports_link_status(RTE_PORT_ALL);
1752 close_port(portid_t pid)
1755 struct rte_port *port;
1757 if (port_id_is_invalid(pid, ENABLED_WARN))
1760 printf("Closing ports...\n");
1762 RTE_ETH_FOREACH_DEV(pi) {
1763 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1766 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1767 printf("Please remove port %d from forwarding configuration.\n", pi);
1771 if (port_is_bonding_slave(pi)) {
1772 printf("Please remove port %d from bonded device.\n", pi);
1777 if (rte_atomic16_cmpset(&(port->port_status),
1778 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1779 printf("Port %d is already closed\n", pi);
1783 if (rte_atomic16_cmpset(&(port->port_status),
1784 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1785 printf("Port %d is now not stopped\n", pi);
1789 if (port->flow_list)
1790 port_flow_flush(pi);
1791 rte_eth_dev_close(pi);
1793 if (rte_atomic16_cmpset(&(port->port_status),
1794 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1795 printf("Port %d cannot be set to closed\n", pi);
1802 reset_port(portid_t pid)
1806 struct rte_port *port;
1808 if (port_id_is_invalid(pid, ENABLED_WARN))
1811 printf("Resetting ports...\n");
1813 RTE_ETH_FOREACH_DEV(pi) {
1814 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1817 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1818 printf("Please remove port %d from forwarding "
1819 "configuration.\n", pi);
1823 if (port_is_bonding_slave(pi)) {
1824 printf("Please remove port %d from bonded device.\n",
1829 diag = rte_eth_dev_reset(pi);
1832 port->need_reconfig = 1;
1833 port->need_reconfig_queues = 1;
1835 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1843 attach_port(char *identifier)
1846 unsigned int socket_id;
1848 printf("Attaching a new port...\n");
1850 if (identifier == NULL) {
1851 printf("Invalid parameters are specified\n");
1855 if (rte_eth_dev_attach(identifier, &pi))
1858 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1859 /* if socket_id is invalid, set to 0 */
1860 if (check_socket_id(socket_id) < 0)
1862 reconfig(pi, socket_id);
1863 rte_eth_promiscuous_enable(pi);
1865 nb_ports = rte_eth_dev_count();
1867 ports[pi].port_status = RTE_PORT_STOPPED;
1869 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1874 detach_port(portid_t port_id)
1876 char name[RTE_ETH_NAME_MAX_LEN];
1878 printf("Detaching a port...\n");
1880 if (!port_is_closed(port_id)) {
1881 printf("Please close port first\n");
1885 if (ports[port_id].flow_list)
1886 port_flow_flush(port_id);
1888 if (rte_eth_dev_detach(port_id, name)) {
1889 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1893 nb_ports = rte_eth_dev_count();
1895 printf("Port '%s' is detached. Now total ports is %d\n",
1907 stop_packet_forwarding();
1909 if (ports != NULL) {
1911 RTE_ETH_FOREACH_DEV(pt_id) {
1912 printf("\nShutting down port %d...\n", pt_id);
1918 printf("\nBye...\n");
1921 typedef void (*cmd_func_t)(void);
1922 struct pmd_test_command {
1923 const char *cmd_name;
1924 cmd_func_t cmd_func;
1927 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1929 /* Check the link status of all ports in up to 9s, and print them finally */
1931 check_all_ports_link_status(uint32_t port_mask)
1933 #define CHECK_INTERVAL 100 /* 100ms */
1934 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1936 uint8_t count, all_ports_up, print_flag = 0;
1937 struct rte_eth_link link;
1939 printf("Checking link statuses...\n");
1941 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1943 RTE_ETH_FOREACH_DEV(portid) {
1944 if ((port_mask & (1 << portid)) == 0)
1946 memset(&link, 0, sizeof(link));
1947 rte_eth_link_get_nowait(portid, &link);
1948 /* print link status if flag set */
1949 if (print_flag == 1) {
1950 if (link.link_status)
1952 "Port%d Link Up. speed %u Mbps- %s\n",
1953 portid, link.link_speed,
1954 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1955 ("full-duplex") : ("half-duplex\n"));
1957 printf("Port %d Link Down\n", portid);
1960 /* clear all_ports_up flag if any link down */
1961 if (link.link_status == ETH_LINK_DOWN) {
1966 /* after finally printing all link status, get out */
1967 if (print_flag == 1)
1970 if (all_ports_up == 0) {
1972 rte_delay_ms(CHECK_INTERVAL);
1975 /* set the print_flag if all ports up or timeout */
1976 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1986 rmv_event_callback(void *arg)
1988 struct rte_eth_dev *dev;
1989 portid_t port_id = (intptr_t)arg;
1991 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1992 dev = &rte_eth_devices[port_id];
1995 close_port(port_id);
1996 printf("removing device %s\n", dev->device->name);
1997 if (rte_eal_dev_detach(dev->device))
1998 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2002 /* This function is used by the interrupt thread */
2004 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2007 static const char * const event_desc[] = {
2008 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2009 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2010 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2011 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2012 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2013 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2014 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2015 [RTE_ETH_EVENT_NEW] = "device probed",
2016 [RTE_ETH_EVENT_DESTROY] = "device released",
2017 [RTE_ETH_EVENT_MAX] = NULL,
2020 RTE_SET_USED(param);
2021 RTE_SET_USED(ret_param);
2023 if (type >= RTE_ETH_EVENT_MAX) {
2024 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2025 port_id, __func__, type);
2027 } else if (event_print_mask & (UINT32_C(1) << type)) {
2028 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2033 if (port_id_is_invalid(port_id, DISABLED_WARN))
2037 case RTE_ETH_EVENT_INTR_RMV:
2038 if (rte_eal_alarm_set(100000,
2039 rmv_event_callback, (void *)(intptr_t)port_id))
2040 fprintf(stderr, "Could not set up deferred device removal\n");
2049 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2053 uint8_t mapping_found = 0;
2055 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2056 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2057 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2058 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2059 tx_queue_stats_mappings[i].queue_id,
2060 tx_queue_stats_mappings[i].stats_counter_id);
2067 port->tx_queue_stats_mapping_enabled = 1;
2072 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2076 uint8_t mapping_found = 0;
2078 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2079 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2080 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2081 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2082 rx_queue_stats_mappings[i].queue_id,
2083 rx_queue_stats_mappings[i].stats_counter_id);
2090 port->rx_queue_stats_mapping_enabled = 1;
2095 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2099 diag = set_tx_queue_stats_mapping_registers(pi, port);
2101 if (diag == -ENOTSUP) {
2102 port->tx_queue_stats_mapping_enabled = 0;
2103 printf("TX queue stats mapping not supported port id=%d\n", pi);
2106 rte_exit(EXIT_FAILURE,
2107 "set_tx_queue_stats_mapping_registers "
2108 "failed for port id=%d diag=%d\n",
2112 diag = set_rx_queue_stats_mapping_registers(pi, port);
2114 if (diag == -ENOTSUP) {
2115 port->rx_queue_stats_mapping_enabled = 0;
2116 printf("RX queue stats mapping not supported port id=%d\n", pi);
2119 rte_exit(EXIT_FAILURE,
2120 "set_rx_queue_stats_mapping_registers "
2121 "failed for port id=%d diag=%d\n",
2127 rxtx_port_config(struct rte_port *port)
2129 port->rx_conf = port->dev_info.default_rxconf;
2130 port->tx_conf = port->dev_info.default_txconf;
2132 /* Check if any RX/TX parameters have been passed */
2133 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2134 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2136 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2137 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2139 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2140 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2142 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2143 port->rx_conf.rx_free_thresh = rx_free_thresh;
2145 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2146 port->rx_conf.rx_drop_en = rx_drop_en;
2148 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2149 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2151 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2152 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2154 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2155 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2157 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2158 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2160 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2161 port->tx_conf.tx_free_thresh = tx_free_thresh;
2165 init_port_config(void)
2168 struct rte_port *port;
2170 RTE_ETH_FOREACH_DEV(pid) {
2172 port->dev_conf.fdir_conf = fdir_conf;
2174 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2175 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2177 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2178 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2181 if (port->dcb_flag == 0) {
2182 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2183 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2185 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2188 rxtx_port_config(port);
2190 rte_eth_macaddr_get(pid, &port->eth_addr);
2192 map_port_queue_stats_mapping_registers(pid, port);
2193 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2194 rte_pmd_ixgbe_bypass_init(pid);
2197 if (lsc_interrupt &&
2198 (rte_eth_devices[pid].data->dev_flags &
2199 RTE_ETH_DEV_INTR_LSC))
2200 port->dev_conf.intr_conf.lsc = 1;
2201 if (rmv_interrupt &&
2202 (rte_eth_devices[pid].data->dev_flags &
2203 RTE_ETH_DEV_INTR_RMV))
2204 port->dev_conf.intr_conf.rmv = 1;
2206 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2207 /* Detect softnic port */
2208 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2209 port->softnic_enable = 1;
2210 memset(&port->softport, 0, sizeof(struct softnic_port));
2212 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2213 port->softport.tm_flag = 1;
2219 void set_port_slave_flag(portid_t slave_pid)
2221 struct rte_port *port;
2223 port = &ports[slave_pid];
2224 port->slave_flag = 1;
2227 void clear_port_slave_flag(portid_t slave_pid)
2229 struct rte_port *port;
2231 port = &ports[slave_pid];
2232 port->slave_flag = 0;
2235 uint8_t port_is_bonding_slave(portid_t slave_pid)
2237 struct rte_port *port;
2239 port = &ports[slave_pid];
2240 return port->slave_flag;
2243 const uint16_t vlan_tags[] = {
2244 0, 1, 2, 3, 4, 5, 6, 7,
2245 8, 9, 10, 11, 12, 13, 14, 15,
2246 16, 17, 18, 19, 20, 21, 22, 23,
2247 24, 25, 26, 27, 28, 29, 30, 31
2251 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2252 enum dcb_mode_enable dcb_mode,
2253 enum rte_eth_nb_tcs num_tcs,
2259 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2260 * given above, and the number of traffic classes available for use.
2262 if (dcb_mode == DCB_VT_ENABLED) {
2263 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2264 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2265 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2266 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2268 /* VMDQ+DCB RX and TX configurations */
2269 vmdq_rx_conf->enable_default_pool = 0;
2270 vmdq_rx_conf->default_pool = 0;
2271 vmdq_rx_conf->nb_queue_pools =
2272 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2273 vmdq_tx_conf->nb_queue_pools =
2274 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2276 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2277 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2278 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2279 vmdq_rx_conf->pool_map[i].pools =
2280 1 << (i % vmdq_rx_conf->nb_queue_pools);
2282 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2283 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2284 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2287 /* set DCB mode of RX and TX of multiple queues */
2288 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2289 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2291 struct rte_eth_dcb_rx_conf *rx_conf =
2292 ð_conf->rx_adv_conf.dcb_rx_conf;
2293 struct rte_eth_dcb_tx_conf *tx_conf =
2294 ð_conf->tx_adv_conf.dcb_tx_conf;
2296 rx_conf->nb_tcs = num_tcs;
2297 tx_conf->nb_tcs = num_tcs;
2299 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2300 rx_conf->dcb_tc[i] = i % num_tcs;
2301 tx_conf->dcb_tc[i] = i % num_tcs;
2303 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2304 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2305 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2309 eth_conf->dcb_capability_en =
2310 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2312 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2318 init_port_dcb_config(portid_t pid,
2319 enum dcb_mode_enable dcb_mode,
2320 enum rte_eth_nb_tcs num_tcs,
2323 struct rte_eth_conf port_conf;
2324 struct rte_port *rte_port;
2328 rte_port = &ports[pid];
2330 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2331 /* Enter DCB configuration status */
2334 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2335 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2338 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2341 * Write the configuration into the device.
2342 * Set the numbers of RX & TX queues to 0, so
2343 * the RX & TX queues will not be setup.
2345 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2347 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2349 /* If dev_info.vmdq_pool_base is greater than 0,
2350 * the queue id of vmdq pools is started after pf queues.
2352 if (dcb_mode == DCB_VT_ENABLED &&
2353 rte_port->dev_info.vmdq_pool_base > 0) {
2354 printf("VMDQ_DCB multi-queue mode is nonsensical"
2355 " for port %d.", pid);
2359 /* Assume the ports in testpmd have the same dcb capability
2360 * and has the same number of rxq and txq in dcb mode
2362 if (dcb_mode == DCB_VT_ENABLED) {
2363 if (rte_port->dev_info.max_vfs > 0) {
2364 nb_rxq = rte_port->dev_info.nb_rx_queues;
2365 nb_txq = rte_port->dev_info.nb_tx_queues;
2367 nb_rxq = rte_port->dev_info.max_rx_queues;
2368 nb_txq = rte_port->dev_info.max_tx_queues;
2371 /*if vt is disabled, use all pf queues */
2372 if (rte_port->dev_info.vmdq_pool_base == 0) {
2373 nb_rxq = rte_port->dev_info.max_rx_queues;
2374 nb_txq = rte_port->dev_info.max_tx_queues;
2376 nb_rxq = (queueid_t)num_tcs;
2377 nb_txq = (queueid_t)num_tcs;
2381 rx_free_thresh = 64;
2383 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2385 rxtx_port_config(rte_port);
2387 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2388 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2389 rx_vft_set(pid, vlan_tags[i], 1);
2391 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2392 map_port_queue_stats_mapping_registers(pid, rte_port);
2394 rte_port->dcb_flag = 1;
2402 /* Configuration of Ethernet ports. */
2403 ports = rte_zmalloc("testpmd: ports",
2404 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2405 RTE_CACHE_LINE_SIZE);
2406 if (ports == NULL) {
2407 rte_exit(EXIT_FAILURE,
2408 "rte_zmalloc(%d struct rte_port) failed\n",
2424 const char clr[] = { 27, '[', '2', 'J', '\0' };
2425 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2427 /* Clear screen and move to top left */
2428 printf("%s%s", clr, top_left);
2430 printf("\nPort statistics ====================================");
2431 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2432 nic_stats_display(fwd_ports_ids[i]);
2436 signal_handler(int signum)
2438 if (signum == SIGINT || signum == SIGTERM) {
2439 printf("\nSignal %d received, preparing to exit...\n",
2441 #ifdef RTE_LIBRTE_PDUMP
2442 /* uninitialize packet capture framework */
2445 #ifdef RTE_LIBRTE_LATENCY_STATS
2446 rte_latencystats_uninit();
2449 /* Set flag to indicate the force termination. */
2451 /* exit with the expected status */
2452 signal(signum, SIG_DFL);
2453 kill(getpid(), signum);
2458 main(int argc, char** argv)
2463 signal(SIGINT, signal_handler);
2464 signal(SIGTERM, signal_handler);
2466 diag = rte_eal_init(argc, argv);
2468 rte_panic("Cannot init EAL\n");
2470 testpmd_logtype = rte_log_register("testpmd");
2471 if (testpmd_logtype < 0)
2472 rte_panic("Cannot register log type");
2473 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2475 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2476 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2480 #ifdef RTE_LIBRTE_PDUMP
2481 /* initialize packet capture framework */
2482 rte_pdump_init(NULL);
2485 nb_ports = (portid_t) rte_eth_dev_count();
2487 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2489 /* allocate port structures, and init them */
2492 set_def_fwd_config();
2494 rte_panic("Empty set of forwarding logical cores - check the "
2495 "core mask supplied in the command parameters\n");
2497 /* Bitrate/latency stats disabled by default */
2498 #ifdef RTE_LIBRTE_BITRATE
2499 bitrate_enabled = 0;
2501 #ifdef RTE_LIBRTE_LATENCY_STATS
2502 latencystats_enabled = 0;
2508 launch_args_parse(argc, argv);
2510 if (tx_first && interactive)
2511 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2512 "interactive mode.\n");
2514 if (tx_first && lsc_interrupt) {
2515 printf("Warning: lsc_interrupt needs to be off when "
2516 " using tx_first. Disabling.\n");
2520 if (!nb_rxq && !nb_txq)
2521 printf("Warning: Either rx or tx queues should be non-zero\n");
2523 if (nb_rxq > 1 && nb_rxq > nb_txq)
2524 printf("Warning: nb_rxq=%d enables RSS configuration, "
2525 "but nb_txq=%d will prevent to fully test it.\n",
2529 if (start_port(RTE_PORT_ALL) != 0)
2530 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2532 /* set all ports to promiscuous mode by default */
2533 RTE_ETH_FOREACH_DEV(port_id)
2534 rte_eth_promiscuous_enable(port_id);
2536 /* Init metrics library */
2537 rte_metrics_init(rte_socket_id());
2539 #ifdef RTE_LIBRTE_LATENCY_STATS
2540 if (latencystats_enabled != 0) {
2541 int ret = rte_latencystats_init(1, NULL);
2543 printf("Warning: latencystats init()"
2544 " returned error %d\n", ret);
2545 printf("Latencystats running on lcore %d\n",
2546 latencystats_lcore_id);
2550 /* Setup bitrate stats */
2551 #ifdef RTE_LIBRTE_BITRATE
2552 if (bitrate_enabled != 0) {
2553 bitrate_data = rte_stats_bitrate_create();
2554 if (bitrate_data == NULL)
2555 rte_exit(EXIT_FAILURE,
2556 "Could not allocate bitrate data.\n");
2557 rte_stats_bitrate_reg(bitrate_data);
2561 #ifdef RTE_LIBRTE_CMDLINE
2562 if (strlen(cmdline_filename) != 0)
2563 cmdline_read_from_file(cmdline_filename);
2565 if (interactive == 1) {
2567 printf("Start automatic packet forwarding\n");
2568 start_packet_forwarding(0);
2580 printf("No commandline core given, start packet forwarding\n");
2581 start_packet_forwarding(tx_first);
2582 if (stats_period != 0) {
2583 uint64_t prev_time = 0, cur_time, diff_time = 0;
2584 uint64_t timer_period;
2586 /* Convert to number of cycles */
2587 timer_period = stats_period * rte_get_timer_hz();
2589 while (f_quit == 0) {
2590 cur_time = rte_get_timer_cycles();
2591 diff_time += cur_time - prev_time;
2593 if (diff_time >= timer_period) {
2595 /* Reset the timer */
2598 /* Sleep to avoid unnecessary checks */
2599 prev_time = cur_time;
2604 printf("Press enter to exit\n");
2605 rc = read(0, &c, 1);