1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
160 &softnic_tm_bypass_engine,
162 #ifdef RTE_LIBRTE_IEEE1588
163 &ieee1588_fwd_engine,
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
176 * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
180 * In container, it cannot terminate the process which running with 'stats-period'
181 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
186 * Configuration of packet segments used by the "txonly" processing engine.
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 TXONLY_DEF_PACKET_LEN,
192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
207 * Configurable number of RX/TX queues.
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
213 * Configurable number of RX/TX ring descriptors.
214 * Defaults are supplied by drivers via ethdev.
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 #define RTE_PMD_PARAM_UNSET -1
223 * Configurable values of RX and TX ring threshold registers.
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX free threshold.
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of RX drop enable.
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX free threshold.
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX RS bit threshold.
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Flow API isolated mode.
272 uint8_t flow_isolate_all;
275 * Avoids to check link status when starting/stopping a port.
277 uint8_t no_link_check = 0; /* check by default */
280 * Enable link status change notification
282 uint8_t lsc_interrupt = 1; /* enabled by default */
285 * Enable device removal notification.
287 uint8_t rmv_interrupt = 1; /* enabled by default */
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
292 * Display or mask ether events
293 * Default to all events except VF_MBOX
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
304 * NIC bypass mode configuration options.
307 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
308 /* The NIC bypass watchdog timeout. */
309 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
313 #ifdef RTE_LIBRTE_LATENCY_STATS
316 * Set when latency stats is enabled in the commandline
318 uint8_t latencystats_enabled;
321 * Lcore ID to serive latency statistics.
323 lcoreid_t latencystats_lcore_id = -1;
328 * Ethernet device configuration.
330 struct rte_eth_rxmode rx_mode = {
331 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
332 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
333 .ignore_offload_bitfield = 1,
336 struct rte_eth_txmode tx_mode = {
337 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
340 struct rte_fdir_conf fdir_conf = {
341 .mode = RTE_FDIR_MODE_NONE,
342 .pballoc = RTE_FDIR_PBALLOC_64K,
343 .status = RTE_FDIR_REPORT_STATUS,
345 .vlan_tci_mask = 0x0,
347 .src_ip = 0xFFFFFFFF,
348 .dst_ip = 0xFFFFFFFF,
351 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
352 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 .src_port_mask = 0xFFFF,
355 .dst_port_mask = 0xFFFF,
356 .mac_addr_byte_mask = 0xFF,
357 .tunnel_type_mask = 1,
358 .tunnel_id_mask = 0xFFFFFFFF,
363 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
365 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
366 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
369 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
371 uint16_t nb_tx_queue_stats_mappings = 0;
372 uint16_t nb_rx_queue_stats_mappings = 0;
375 * Display zero values by default for xstats
377 uint8_t xstats_hide_zero;
379 unsigned int num_sockets = 0;
380 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
382 #ifdef RTE_LIBRTE_BITRATE
383 /* Bitrate statistics */
384 struct rte_stats_bitrates *bitrate_data;
385 lcoreid_t bitrate_lcore_id;
386 uint8_t bitrate_enabled;
389 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
390 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
392 /* Forward function declarations */
393 static void map_port_queue_stats_mapping_registers(portid_t pi,
394 struct rte_port *port);
395 static void check_all_ports_link_status(uint32_t port_mask);
396 static int eth_event_callback(portid_t port_id,
397 enum rte_eth_event_type type,
398 void *param, void *ret_param);
399 static void eth_dev_event_callback(char *device_name,
400 enum rte_dev_event_type type,
402 static int eth_dev_event_callback_register(void);
403 static int eth_dev_event_callback_unregister(void);
407 * Check if all the ports are started.
408 * If yes, return positive value. If not, return zero.
410 static int all_ports_started(void);
412 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
413 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
416 * Helper function to check if socket is already discovered.
417 * If yes, return positive value. If not, return zero.
420 new_socket_id(unsigned int socket_id)
424 for (i = 0; i < num_sockets; i++) {
425 if (socket_ids[i] == socket_id)
432 * Setup default configuration.
435 set_default_fwd_lcores_config(void)
439 unsigned int sock_num;
442 for (i = 0; i < RTE_MAX_LCORE; i++) {
443 sock_num = rte_lcore_to_socket_id(i);
444 if (new_socket_id(sock_num)) {
445 if (num_sockets >= RTE_MAX_NUMA_NODES) {
446 rte_exit(EXIT_FAILURE,
447 "Total sockets greater than %u\n",
450 socket_ids[num_sockets++] = sock_num;
452 if (!rte_lcore_is_enabled(i))
454 if (i == rte_get_master_lcore())
456 fwd_lcores_cpuids[nb_lc++] = i;
458 nb_lcores = (lcoreid_t) nb_lc;
459 nb_cfg_lcores = nb_lcores;
464 set_def_peer_eth_addrs(void)
468 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
469 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
470 peer_eth_addrs[i].addr_bytes[5] = i;
475 set_default_fwd_ports_config(void)
480 RTE_ETH_FOREACH_DEV(pt_id)
481 fwd_ports_ids[i++] = pt_id;
483 nb_cfg_ports = nb_ports;
484 nb_fwd_ports = nb_ports;
488 set_def_fwd_config(void)
490 set_default_fwd_lcores_config();
491 set_def_peer_eth_addrs();
492 set_default_fwd_ports_config();
496 * Configuration initialisation done once at init time.
499 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
500 unsigned int socket_id)
502 char pool_name[RTE_MEMPOOL_NAMESIZE];
503 struct rte_mempool *rte_mp = NULL;
506 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
507 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
510 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
511 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
514 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
515 mb_size, (unsigned) mb_mempool_cache,
516 sizeof(struct rte_pktmbuf_pool_private),
521 if (rte_mempool_populate_anon(rte_mp) == 0) {
522 rte_mempool_free(rte_mp);
526 rte_pktmbuf_pool_init(rte_mp, NULL);
527 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
529 /* wrapper to rte_mempool_create() */
530 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
531 rte_mbuf_best_mempool_ops());
532 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
533 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
537 if (rte_mp == NULL) {
538 rte_exit(EXIT_FAILURE,
539 "Creation of mbuf pool for socket %u failed: %s\n",
540 socket_id, rte_strerror(rte_errno));
541 } else if (verbose_level > 0) {
542 rte_mempool_dump(stdout, rte_mp);
547 * Check given socket id is valid or not with NUMA mode,
548 * if valid, return 0, else return -1
551 check_socket_id(const unsigned int socket_id)
553 static int warning_once = 0;
555 if (new_socket_id(socket_id)) {
556 if (!warning_once && numa_support)
557 printf("Warning: NUMA should be configured manually by"
558 " using --port-numa-config and"
559 " --ring-numa-config parameters along with"
568 * Get the allowed maximum number of RX queues.
569 * *pid return the port id which has minimal value of
570 * max_rx_queues in all ports.
573 get_allowed_max_nb_rxq(portid_t *pid)
575 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
577 struct rte_eth_dev_info dev_info;
579 RTE_ETH_FOREACH_DEV(pi) {
580 rte_eth_dev_info_get(pi, &dev_info);
581 if (dev_info.max_rx_queues < allowed_max_rxq) {
582 allowed_max_rxq = dev_info.max_rx_queues;
586 return allowed_max_rxq;
590 * Check input rxq is valid or not.
591 * If input rxq is not greater than any of maximum number
592 * of RX queues of all ports, it is valid.
593 * if valid, return 0, else return -1
596 check_nb_rxq(queueid_t rxq)
598 queueid_t allowed_max_rxq;
601 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
602 if (rxq > allowed_max_rxq) {
603 printf("Fail: input rxq (%u) can't be greater "
604 "than max_rx_queues (%u) of port %u\n",
614 * Get the allowed maximum number of TX queues.
615 * *pid return the port id which has minimal value of
616 * max_tx_queues in all ports.
619 get_allowed_max_nb_txq(portid_t *pid)
621 queueid_t allowed_max_txq = MAX_QUEUE_ID;
623 struct rte_eth_dev_info dev_info;
625 RTE_ETH_FOREACH_DEV(pi) {
626 rte_eth_dev_info_get(pi, &dev_info);
627 if (dev_info.max_tx_queues < allowed_max_txq) {
628 allowed_max_txq = dev_info.max_tx_queues;
632 return allowed_max_txq;
636 * Check input txq is valid or not.
637 * If input txq is not greater than any of maximum number
638 * of TX queues of all ports, it is valid.
639 * if valid, return 0, else return -1
642 check_nb_txq(queueid_t txq)
644 queueid_t allowed_max_txq;
647 allowed_max_txq = get_allowed_max_nb_txq(&pid);
648 if (txq > allowed_max_txq) {
649 printf("Fail: input txq (%u) can't be greater "
650 "than max_tx_queues (%u) of port %u\n",
663 struct rte_port *port;
664 struct rte_mempool *mbp;
665 unsigned int nb_mbuf_per_pool;
667 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
668 struct rte_gro_param gro_param;
671 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
674 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
675 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
676 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 /* Configuration of logical cores. */
680 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
681 sizeof(struct fwd_lcore *) * nb_lcores,
682 RTE_CACHE_LINE_SIZE);
683 if (fwd_lcores == NULL) {
684 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
685 "failed\n", nb_lcores);
687 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
688 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
689 sizeof(struct fwd_lcore),
690 RTE_CACHE_LINE_SIZE);
691 if (fwd_lcores[lc_id] == NULL) {
692 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
695 fwd_lcores[lc_id]->cpuid_idx = lc_id;
698 RTE_ETH_FOREACH_DEV(pid) {
700 /* Apply default TxRx configuration for all ports */
701 port->dev_conf.txmode = tx_mode;
702 port->dev_conf.rxmode = rx_mode;
703 rte_eth_dev_info_get(pid, &port->dev_info);
704 if (!(port->dev_info.tx_offload_capa &
705 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
706 port->dev_conf.txmode.offloads &=
707 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
709 if (port_numa[pid] != NUMA_NO_CONFIG)
710 port_per_socket[port_numa[pid]]++;
712 uint32_t socket_id = rte_eth_dev_socket_id(pid);
714 /* if socket_id is invalid, set to 0 */
715 if (check_socket_id(socket_id) < 0)
717 port_per_socket[socket_id]++;
721 /* set flag to initialize port/queue */
722 port->need_reconfig = 1;
723 port->need_reconfig_queues = 1;
727 * Create pools of mbuf.
728 * If NUMA support is disabled, create a single pool of mbuf in
729 * socket 0 memory by default.
730 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
732 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
733 * nb_txd can be configured at run time.
735 if (param_total_num_mbufs)
736 nb_mbuf_per_pool = param_total_num_mbufs;
738 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
739 (nb_lcores * mb_mempool_cache) +
740 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
741 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
747 for (i = 0; i < num_sockets; i++)
748 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
751 if (socket_num == UMA_NO_CONFIG)
752 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
754 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
760 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
761 DEV_TX_OFFLOAD_GRE_TNL_TSO;
763 * Records which Mbuf pool to use by each logical core, if needed.
765 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
766 mbp = mbuf_pool_find(
767 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
770 mbp = mbuf_pool_find(0);
771 fwd_lcores[lc_id]->mbp = mbp;
772 /* initialize GSO context */
773 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
774 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
775 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
776 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
778 fwd_lcores[lc_id]->gso_ctx.flag = 0;
781 /* Configuration of packet forwarding streams. */
782 if (init_fwd_streams() < 0)
783 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
787 /* create a gro context for each lcore */
788 gro_param.gro_types = RTE_GRO_TCP_IPV4;
789 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
790 gro_param.max_item_per_flow = MAX_PKT_BURST;
791 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
792 gro_param.socket_id = rte_lcore_to_socket_id(
793 fwd_lcores_cpuids[lc_id]);
794 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
795 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
796 rte_exit(EXIT_FAILURE,
797 "rte_gro_ctx_create() failed\n");
804 reconfig(portid_t new_port_id, unsigned socket_id)
806 struct rte_port *port;
808 /* Reconfiguration of Ethernet ports. */
809 port = &ports[new_port_id];
810 rte_eth_dev_info_get(new_port_id, &port->dev_info);
812 /* set flag to initialize port/queue */
813 port->need_reconfig = 1;
814 port->need_reconfig_queues = 1;
815 port->socket_id = socket_id;
822 init_fwd_streams(void)
825 struct rte_port *port;
826 streamid_t sm_id, nb_fwd_streams_new;
829 /* set socket id according to numa or not */
830 RTE_ETH_FOREACH_DEV(pid) {
832 if (nb_rxq > port->dev_info.max_rx_queues) {
833 printf("Fail: nb_rxq(%d) is greater than "
834 "max_rx_queues(%d)\n", nb_rxq,
835 port->dev_info.max_rx_queues);
838 if (nb_txq > port->dev_info.max_tx_queues) {
839 printf("Fail: nb_txq(%d) is greater than "
840 "max_tx_queues(%d)\n", nb_txq,
841 port->dev_info.max_tx_queues);
845 if (port_numa[pid] != NUMA_NO_CONFIG)
846 port->socket_id = port_numa[pid];
848 port->socket_id = rte_eth_dev_socket_id(pid);
850 /* if socket_id is invalid, set to 0 */
851 if (check_socket_id(port->socket_id) < 0)
856 if (socket_num == UMA_NO_CONFIG)
859 port->socket_id = socket_num;
863 q = RTE_MAX(nb_rxq, nb_txq);
865 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
868 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
869 if (nb_fwd_streams_new == nb_fwd_streams)
872 if (fwd_streams != NULL) {
873 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
874 if (fwd_streams[sm_id] == NULL)
876 rte_free(fwd_streams[sm_id]);
877 fwd_streams[sm_id] = NULL;
879 rte_free(fwd_streams);
884 nb_fwd_streams = nb_fwd_streams_new;
885 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
886 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
887 if (fwd_streams == NULL)
888 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
889 "failed\n", nb_fwd_streams);
891 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
892 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
893 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
894 if (fwd_streams[sm_id] == NULL)
895 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
902 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
904 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
906 unsigned int total_burst;
907 unsigned int nb_burst;
908 unsigned int burst_stats[3];
909 uint16_t pktnb_stats[3];
911 int burst_percent[3];
914 * First compute the total number of packet bursts and the
915 * two highest numbers of bursts of the same number of packets.
918 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
919 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
920 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
921 nb_burst = pbs->pkt_burst_spread[nb_pkt];
924 total_burst += nb_burst;
925 if (nb_burst > burst_stats[0]) {
926 burst_stats[1] = burst_stats[0];
927 pktnb_stats[1] = pktnb_stats[0];
928 burst_stats[0] = nb_burst;
929 pktnb_stats[0] = nb_pkt;
932 if (total_burst == 0)
934 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
935 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
936 burst_percent[0], (int) pktnb_stats[0]);
937 if (burst_stats[0] == total_burst) {
941 if (burst_stats[0] + burst_stats[1] == total_burst) {
942 printf(" + %d%% of %d pkts]\n",
943 100 - burst_percent[0], pktnb_stats[1]);
946 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
947 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
948 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
949 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
952 printf(" + %d%% of %d pkts + %d%% of others]\n",
953 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
955 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
958 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
960 struct rte_port *port;
963 static const char *fwd_stats_border = "----------------------";
965 port = &ports[port_id];
966 printf("\n %s Forward statistics for port %-2d %s\n",
967 fwd_stats_border, port_id, fwd_stats_border);
969 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
970 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
972 stats->ipackets, stats->imissed,
973 (uint64_t) (stats->ipackets + stats->imissed));
975 if (cur_fwd_eng == &csum_fwd_engine)
976 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
977 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
978 if ((stats->ierrors + stats->rx_nombuf) > 0) {
979 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
980 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
983 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
985 stats->opackets, port->tx_dropped,
986 (uint64_t) (stats->opackets + port->tx_dropped));
989 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
991 stats->ipackets, stats->imissed,
992 (uint64_t) (stats->ipackets + stats->imissed));
994 if (cur_fwd_eng == &csum_fwd_engine)
995 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
996 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
997 if ((stats->ierrors + stats->rx_nombuf) > 0) {
998 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
999 printf(" RX-nombufs: %14"PRIu64"\n",
1003 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1005 stats->opackets, port->tx_dropped,
1006 (uint64_t) (stats->opackets + port->tx_dropped));
1009 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1010 if (port->rx_stream)
1011 pkt_burst_stats_display("RX",
1012 &port->rx_stream->rx_burst_stats);
1013 if (port->tx_stream)
1014 pkt_burst_stats_display("TX",
1015 &port->tx_stream->tx_burst_stats);
1018 if (port->rx_queue_stats_mapping_enabled) {
1020 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1021 printf(" Stats reg %2d RX-packets:%14"PRIu64
1022 " RX-errors:%14"PRIu64
1023 " RX-bytes:%14"PRIu64"\n",
1024 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1028 if (port->tx_queue_stats_mapping_enabled) {
1029 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1030 printf(" Stats reg %2d TX-packets:%14"PRIu64
1031 " TX-bytes:%14"PRIu64"\n",
1032 i, stats->q_opackets[i], stats->q_obytes[i]);
1036 printf(" %s--------------------------------%s\n",
1037 fwd_stats_border, fwd_stats_border);
1041 fwd_stream_stats_display(streamid_t stream_id)
1043 struct fwd_stream *fs;
1044 static const char *fwd_top_stats_border = "-------";
1046 fs = fwd_streams[stream_id];
1047 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1048 (fs->fwd_dropped == 0))
1050 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1051 "TX Port=%2d/Queue=%2d %s\n",
1052 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1053 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1054 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1055 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1057 /* if checksum mode */
1058 if (cur_fwd_eng == &csum_fwd_engine) {
1059 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1060 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1063 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1064 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1065 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1070 flush_fwd_rx_queues(void)
1072 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1079 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1080 uint64_t timer_period;
1082 /* convert to number of cycles */
1083 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1085 for (j = 0; j < 2; j++) {
1086 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1087 for (rxq = 0; rxq < nb_rxq; rxq++) {
1088 port_id = fwd_ports_ids[rxp];
1090 * testpmd can stuck in the below do while loop
1091 * if rte_eth_rx_burst() always returns nonzero
1092 * packets. So timer is added to exit this loop
1093 * after 1sec timer expiry.
1095 prev_tsc = rte_rdtsc();
1097 nb_rx = rte_eth_rx_burst(port_id, rxq,
1098 pkts_burst, MAX_PKT_BURST);
1099 for (i = 0; i < nb_rx; i++)
1100 rte_pktmbuf_free(pkts_burst[i]);
1102 cur_tsc = rte_rdtsc();
1103 diff_tsc = cur_tsc - prev_tsc;
1104 timer_tsc += diff_tsc;
1105 } while ((nb_rx > 0) &&
1106 (timer_tsc < timer_period));
1110 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1115 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1117 struct fwd_stream **fsm;
1120 #ifdef RTE_LIBRTE_BITRATE
1121 uint64_t tics_per_1sec;
1122 uint64_t tics_datum;
1123 uint64_t tics_current;
1126 tics_datum = rte_rdtsc();
1127 tics_per_1sec = rte_get_timer_hz();
1129 fsm = &fwd_streams[fc->stream_idx];
1130 nb_fs = fc->stream_nb;
1132 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1133 (*pkt_fwd)(fsm[sm_id]);
1134 #ifdef RTE_LIBRTE_BITRATE
1135 if (bitrate_enabled != 0 &&
1136 bitrate_lcore_id == rte_lcore_id()) {
1137 tics_current = rte_rdtsc();
1138 if (tics_current - tics_datum >= tics_per_1sec) {
1139 /* Periodic bitrate calculation */
1140 RTE_ETH_FOREACH_DEV(idx_port)
1141 rte_stats_bitrate_calc(bitrate_data,
1143 tics_datum = tics_current;
1147 #ifdef RTE_LIBRTE_LATENCY_STATS
1148 if (latencystats_enabled != 0 &&
1149 latencystats_lcore_id == rte_lcore_id())
1150 rte_latencystats_update();
1153 } while (! fc->stopped);
1157 start_pkt_forward_on_core(void *fwd_arg)
1159 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1160 cur_fwd_config.fwd_eng->packet_fwd);
1165 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1166 * Used to start communication flows in network loopback test configurations.
1169 run_one_txonly_burst_on_core(void *fwd_arg)
1171 struct fwd_lcore *fwd_lc;
1172 struct fwd_lcore tmp_lcore;
1174 fwd_lc = (struct fwd_lcore *) fwd_arg;
1175 tmp_lcore = *fwd_lc;
1176 tmp_lcore.stopped = 1;
1177 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1182 * Launch packet forwarding:
1183 * - Setup per-port forwarding context.
1184 * - launch logical cores with their forwarding configuration.
1187 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1189 port_fwd_begin_t port_fwd_begin;
1194 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1195 if (port_fwd_begin != NULL) {
1196 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1197 (*port_fwd_begin)(fwd_ports_ids[i]);
1199 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1200 lc_id = fwd_lcores_cpuids[i];
1201 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1202 fwd_lcores[i]->stopped = 0;
1203 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1204 fwd_lcores[i], lc_id);
1206 printf("launch lcore %u failed - diag=%d\n",
1213 * Launch packet forwarding configuration.
1216 start_packet_forwarding(int with_tx_first)
1218 port_fwd_begin_t port_fwd_begin;
1219 port_fwd_end_t port_fwd_end;
1220 struct rte_port *port;
1225 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1226 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1228 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1229 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1231 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1232 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1233 (!nb_rxq || !nb_txq))
1234 rte_exit(EXIT_FAILURE,
1235 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1236 cur_fwd_eng->fwd_mode_name);
1238 if (all_ports_started() == 0) {
1239 printf("Not all ports were started\n");
1242 if (test_done == 0) {
1243 printf("Packet forwarding already started\n");
1247 if (init_fwd_streams() < 0) {
1248 printf("Fail from init_fwd_streams()\n");
1253 for (i = 0; i < nb_fwd_ports; i++) {
1254 pt_id = fwd_ports_ids[i];
1255 port = &ports[pt_id];
1256 if (!port->dcb_flag) {
1257 printf("In DCB mode, all forwarding ports must "
1258 "be configured in this mode.\n");
1262 if (nb_fwd_lcores == 1) {
1263 printf("In DCB mode,the nb forwarding cores "
1264 "should be larger than 1.\n");
1271 flush_fwd_rx_queues();
1274 pkt_fwd_config_display(&cur_fwd_config);
1275 rxtx_config_display();
1277 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1278 pt_id = fwd_ports_ids[i];
1279 port = &ports[pt_id];
1280 rte_eth_stats_get(pt_id, &port->stats);
1281 port->tx_dropped = 0;
1283 map_port_queue_stats_mapping_registers(pt_id, port);
1285 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1286 fwd_streams[sm_id]->rx_packets = 0;
1287 fwd_streams[sm_id]->tx_packets = 0;
1288 fwd_streams[sm_id]->fwd_dropped = 0;
1289 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1290 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1292 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1293 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1294 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1295 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1296 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1298 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1299 fwd_streams[sm_id]->core_cycles = 0;
1302 if (with_tx_first) {
1303 port_fwd_begin = tx_only_engine.port_fwd_begin;
1304 if (port_fwd_begin != NULL) {
1305 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1306 (*port_fwd_begin)(fwd_ports_ids[i]);
1308 while (with_tx_first--) {
1309 launch_packet_forwarding(
1310 run_one_txonly_burst_on_core);
1311 rte_eal_mp_wait_lcore();
1313 port_fwd_end = tx_only_engine.port_fwd_end;
1314 if (port_fwd_end != NULL) {
1315 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1316 (*port_fwd_end)(fwd_ports_ids[i]);
1319 launch_packet_forwarding(start_pkt_forward_on_core);
1323 stop_packet_forwarding(void)
1325 struct rte_eth_stats stats;
1326 struct rte_port *port;
1327 port_fwd_end_t port_fwd_end;
1332 uint64_t total_recv;
1333 uint64_t total_xmit;
1334 uint64_t total_rx_dropped;
1335 uint64_t total_tx_dropped;
1336 uint64_t total_rx_nombuf;
1337 uint64_t tx_dropped;
1338 uint64_t rx_bad_ip_csum;
1339 uint64_t rx_bad_l4_csum;
1340 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1341 uint64_t fwd_cycles;
1344 static const char *acc_stats_border = "+++++++++++++++";
1347 printf("Packet forwarding not started\n");
1350 printf("Telling cores to stop...");
1351 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1352 fwd_lcores[lc_id]->stopped = 1;
1353 printf("\nWaiting for lcores to finish...\n");
1354 rte_eal_mp_wait_lcore();
1355 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1356 if (port_fwd_end != NULL) {
1357 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1358 pt_id = fwd_ports_ids[i];
1359 (*port_fwd_end)(pt_id);
1362 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1365 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1366 if (cur_fwd_config.nb_fwd_streams >
1367 cur_fwd_config.nb_fwd_ports) {
1368 fwd_stream_stats_display(sm_id);
1369 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1370 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1372 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1374 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1377 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1378 tx_dropped = (uint64_t) (tx_dropped +
1379 fwd_streams[sm_id]->fwd_dropped);
1380 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1383 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1384 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1385 fwd_streams[sm_id]->rx_bad_ip_csum);
1386 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1390 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1391 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1392 fwd_streams[sm_id]->rx_bad_l4_csum);
1393 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1396 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1397 fwd_cycles = (uint64_t) (fwd_cycles +
1398 fwd_streams[sm_id]->core_cycles);
1403 total_rx_dropped = 0;
1404 total_tx_dropped = 0;
1405 total_rx_nombuf = 0;
1406 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1407 pt_id = fwd_ports_ids[i];
1409 port = &ports[pt_id];
1410 rte_eth_stats_get(pt_id, &stats);
1411 stats.ipackets -= port->stats.ipackets;
1412 port->stats.ipackets = 0;
1413 stats.opackets -= port->stats.opackets;
1414 port->stats.opackets = 0;
1415 stats.ibytes -= port->stats.ibytes;
1416 port->stats.ibytes = 0;
1417 stats.obytes -= port->stats.obytes;
1418 port->stats.obytes = 0;
1419 stats.imissed -= port->stats.imissed;
1420 port->stats.imissed = 0;
1421 stats.oerrors -= port->stats.oerrors;
1422 port->stats.oerrors = 0;
1423 stats.rx_nombuf -= port->stats.rx_nombuf;
1424 port->stats.rx_nombuf = 0;
1426 total_recv += stats.ipackets;
1427 total_xmit += stats.opackets;
1428 total_rx_dropped += stats.imissed;
1429 total_tx_dropped += port->tx_dropped;
1430 total_rx_nombuf += stats.rx_nombuf;
1432 fwd_port_stats_display(pt_id, &stats);
1435 printf("\n %s Accumulated forward statistics for all ports"
1437 acc_stats_border, acc_stats_border);
1438 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1440 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1442 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1443 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1444 if (total_rx_nombuf > 0)
1445 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1446 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1448 acc_stats_border, acc_stats_border);
1449 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1451 printf("\n CPU cycles/packet=%u (total cycles="
1452 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1453 (unsigned int)(fwd_cycles / total_recv),
1454 fwd_cycles, total_recv);
1456 printf("\nDone.\n");
1461 dev_set_link_up(portid_t pid)
1463 if (rte_eth_dev_set_link_up(pid) < 0)
1464 printf("\nSet link up fail.\n");
1468 dev_set_link_down(portid_t pid)
1470 if (rte_eth_dev_set_link_down(pid) < 0)
1471 printf("\nSet link down fail.\n");
1475 all_ports_started(void)
1478 struct rte_port *port;
1480 RTE_ETH_FOREACH_DEV(pi) {
1482 /* Check if there is a port which is not started */
1483 if ((port->port_status != RTE_PORT_STARTED) &&
1484 (port->slave_flag == 0))
1488 /* No port is not started */
1493 port_is_stopped(portid_t port_id)
1495 struct rte_port *port = &ports[port_id];
1497 if ((port->port_status != RTE_PORT_STOPPED) &&
1498 (port->slave_flag == 0))
1504 all_ports_stopped(void)
1508 RTE_ETH_FOREACH_DEV(pi) {
1509 if (!port_is_stopped(pi))
1517 port_is_started(portid_t port_id)
1519 if (port_id_is_invalid(port_id, ENABLED_WARN))
1522 if (ports[port_id].port_status != RTE_PORT_STARTED)
1529 port_is_closed(portid_t port_id)
1531 if (port_id_is_invalid(port_id, ENABLED_WARN))
1534 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1541 start_port(portid_t pid)
1543 int diag, need_check_link_status = -1;
1546 struct rte_port *port;
1547 struct ether_addr mac_addr;
1548 enum rte_eth_event_type event_type;
1550 if (port_id_is_invalid(pid, ENABLED_WARN))
1555 RTE_ETH_FOREACH_DEV(pi) {
1556 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1559 need_check_link_status = 0;
1561 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1562 RTE_PORT_HANDLING) == 0) {
1563 printf("Port %d is now not stopped\n", pi);
1567 if (port->need_reconfig > 0) {
1568 port->need_reconfig = 0;
1570 if (flow_isolate_all) {
1571 int ret = port_flow_isolate(pi, 1);
1573 printf("Failed to apply isolated"
1574 " mode on port %d\n", pi);
1579 printf("Configuring Port %d (socket %u)\n", pi,
1581 /* configure port */
1582 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1585 if (rte_atomic16_cmpset(&(port->port_status),
1586 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1587 printf("Port %d can not be set back "
1588 "to stopped\n", pi);
1589 printf("Fail to configure port %d\n", pi);
1590 /* try to reconfigure port next time */
1591 port->need_reconfig = 1;
1595 if (port->need_reconfig_queues > 0) {
1596 port->need_reconfig_queues = 0;
1597 /* setup tx queues */
1598 for (qi = 0; qi < nb_txq; qi++) {
1599 port->tx_conf[qi].txq_flags =
1600 ETH_TXQ_FLAGS_IGNORE;
1601 /* Apply Tx offloads configuration */
1602 port->tx_conf[qi].offloads =
1603 port->dev_conf.txmode.offloads;
1604 if ((numa_support) &&
1605 (txring_numa[pi] != NUMA_NO_CONFIG))
1606 diag = rte_eth_tx_queue_setup(pi, qi,
1607 port->nb_tx_desc[qi],
1609 &(port->tx_conf[qi]));
1611 diag = rte_eth_tx_queue_setup(pi, qi,
1612 port->nb_tx_desc[qi],
1614 &(port->tx_conf[qi]));
1619 /* Fail to setup tx queue, return */
1620 if (rte_atomic16_cmpset(&(port->port_status),
1622 RTE_PORT_STOPPED) == 0)
1623 printf("Port %d can not be set back "
1624 "to stopped\n", pi);
1625 printf("Fail to configure port %d tx queues\n",
1627 /* try to reconfigure queues next time */
1628 port->need_reconfig_queues = 1;
1631 for (qi = 0; qi < nb_rxq; qi++) {
1632 /* Apply Rx offloads configuration */
1633 port->rx_conf[qi].offloads =
1634 port->dev_conf.rxmode.offloads;
1635 /* setup rx queues */
1636 if ((numa_support) &&
1637 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1638 struct rte_mempool * mp =
1639 mbuf_pool_find(rxring_numa[pi]);
1641 printf("Failed to setup RX queue:"
1642 "No mempool allocation"
1643 " on the socket %d\n",
1648 diag = rte_eth_rx_queue_setup(pi, qi,
1649 port->nb_rx_desc[pi],
1651 &(port->rx_conf[qi]),
1654 struct rte_mempool *mp =
1655 mbuf_pool_find(port->socket_id);
1657 printf("Failed to setup RX queue:"
1658 "No mempool allocation"
1659 " on the socket %d\n",
1663 diag = rte_eth_rx_queue_setup(pi, qi,
1664 port->nb_rx_desc[pi],
1666 &(port->rx_conf[qi]),
1672 /* Fail to setup rx queue, return */
1673 if (rte_atomic16_cmpset(&(port->port_status),
1675 RTE_PORT_STOPPED) == 0)
1676 printf("Port %d can not be set back "
1677 "to stopped\n", pi);
1678 printf("Fail to configure port %d rx queues\n",
1680 /* try to reconfigure queues next time */
1681 port->need_reconfig_queues = 1;
1687 if (rte_eth_dev_start(pi) < 0) {
1688 printf("Fail to start port %d\n", pi);
1690 /* Fail to setup rx queue, return */
1691 if (rte_atomic16_cmpset(&(port->port_status),
1692 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1693 printf("Port %d can not be set back to "
1698 if (rte_atomic16_cmpset(&(port->port_status),
1699 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1700 printf("Port %d can not be set into started\n", pi);
1702 rte_eth_macaddr_get(pi, &mac_addr);
1703 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1704 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1705 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1706 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1708 /* at least one port started, need checking link status */
1709 need_check_link_status = 1;
1712 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1713 event_type < RTE_ETH_EVENT_MAX;
1715 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1720 printf("Failed to setup even callback for event %d\n",
1726 if (need_check_link_status == 1 && !no_link_check)
1727 check_all_ports_link_status(RTE_PORT_ALL);
1728 else if (need_check_link_status == 0)
1729 printf("Please stop the ports first\n");
1736 stop_port(portid_t pid)
1739 struct rte_port *port;
1740 int need_check_link_status = 0;
1747 if (port_id_is_invalid(pid, ENABLED_WARN))
1750 printf("Stopping ports...\n");
1752 RTE_ETH_FOREACH_DEV(pi) {
1753 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1756 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1757 printf("Please remove port %d from forwarding configuration.\n", pi);
1761 if (port_is_bonding_slave(pi)) {
1762 printf("Please remove port %d from bonded device.\n", pi);
1767 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1768 RTE_PORT_HANDLING) == 0)
1771 rte_eth_dev_stop(pi);
1773 if (rte_atomic16_cmpset(&(port->port_status),
1774 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1775 printf("Port %d can not be set into stopped\n", pi);
1776 need_check_link_status = 1;
1778 if (need_check_link_status && !no_link_check)
1779 check_all_ports_link_status(RTE_PORT_ALL);
1785 close_port(portid_t pid)
1788 struct rte_port *port;
1790 if (port_id_is_invalid(pid, ENABLED_WARN))
1793 printf("Closing ports...\n");
1795 RTE_ETH_FOREACH_DEV(pi) {
1796 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1799 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1800 printf("Please remove port %d from forwarding configuration.\n", pi);
1804 if (port_is_bonding_slave(pi)) {
1805 printf("Please remove port %d from bonded device.\n", pi);
1810 if (rte_atomic16_cmpset(&(port->port_status),
1811 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1812 printf("Port %d is already closed\n", pi);
1816 if (rte_atomic16_cmpset(&(port->port_status),
1817 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1818 printf("Port %d is now not stopped\n", pi);
1822 if (port->flow_list)
1823 port_flow_flush(pi);
1824 rte_eth_dev_close(pi);
1826 if (rte_atomic16_cmpset(&(port->port_status),
1827 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1828 printf("Port %d cannot be set to closed\n", pi);
1835 reset_port(portid_t pid)
1839 struct rte_port *port;
1841 if (port_id_is_invalid(pid, ENABLED_WARN))
1844 printf("Resetting ports...\n");
1846 RTE_ETH_FOREACH_DEV(pi) {
1847 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1850 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1851 printf("Please remove port %d from forwarding "
1852 "configuration.\n", pi);
1856 if (port_is_bonding_slave(pi)) {
1857 printf("Please remove port %d from bonded device.\n",
1862 diag = rte_eth_dev_reset(pi);
1865 port->need_reconfig = 1;
1866 port->need_reconfig_queues = 1;
1868 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1876 eth_dev_event_callback_register(void)
1880 /* register the device event callback */
1881 ret = rte_dev_event_callback_register(NULL,
1882 eth_dev_event_callback, NULL);
1884 printf("Failed to register device event callback\n");
1893 eth_dev_event_callback_unregister(void)
1897 /* unregister the device event callback */
1898 ret = rte_dev_event_callback_unregister(NULL,
1899 eth_dev_event_callback, NULL);
1901 printf("Failed to unregister device event callback\n");
1909 attach_port(char *identifier)
1912 unsigned int socket_id;
1914 printf("Attaching a new port...\n");
1916 if (identifier == NULL) {
1917 printf("Invalid parameters are specified\n");
1921 if (rte_eth_dev_attach(identifier, &pi))
1924 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1925 /* if socket_id is invalid, set to 0 */
1926 if (check_socket_id(socket_id) < 0)
1928 reconfig(pi, socket_id);
1929 rte_eth_promiscuous_enable(pi);
1931 nb_ports = rte_eth_dev_count_avail();
1933 ports[pi].port_status = RTE_PORT_STOPPED;
1935 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1940 detach_port(portid_t port_id)
1942 char name[RTE_ETH_NAME_MAX_LEN];
1944 printf("Detaching a port...\n");
1946 if (!port_is_closed(port_id)) {
1947 printf("Please close port first\n");
1951 if (ports[port_id].flow_list)
1952 port_flow_flush(port_id);
1954 if (rte_eth_dev_detach(port_id, name)) {
1955 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1959 nb_ports = rte_eth_dev_count_avail();
1961 printf("Port '%s' is detached. Now total ports is %d\n",
1974 stop_packet_forwarding();
1976 if (ports != NULL) {
1978 RTE_ETH_FOREACH_DEV(pt_id) {
1979 printf("\nShutting down port %d...\n", pt_id);
1987 ret = rte_dev_event_monitor_stop();
1990 "fail to stop device event monitor.");
1992 ret = eth_dev_event_callback_unregister();
1995 "fail to unregister all event callbacks.");
1998 printf("\nBye...\n");
2001 typedef void (*cmd_func_t)(void);
2002 struct pmd_test_command {
2003 const char *cmd_name;
2004 cmd_func_t cmd_func;
2007 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2009 /* Check the link status of all ports in up to 9s, and print them finally */
2011 check_all_ports_link_status(uint32_t port_mask)
2013 #define CHECK_INTERVAL 100 /* 100ms */
2014 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2016 uint8_t count, all_ports_up, print_flag = 0;
2017 struct rte_eth_link link;
2019 printf("Checking link statuses...\n");
2021 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2023 RTE_ETH_FOREACH_DEV(portid) {
2024 if ((port_mask & (1 << portid)) == 0)
2026 memset(&link, 0, sizeof(link));
2027 rte_eth_link_get_nowait(portid, &link);
2028 /* print link status if flag set */
2029 if (print_flag == 1) {
2030 if (link.link_status)
2032 "Port%d Link Up. speed %u Mbps- %s\n",
2033 portid, link.link_speed,
2034 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2035 ("full-duplex") : ("half-duplex\n"));
2037 printf("Port %d Link Down\n", portid);
2040 /* clear all_ports_up flag if any link down */
2041 if (link.link_status == ETH_LINK_DOWN) {
2046 /* after finally printing all link status, get out */
2047 if (print_flag == 1)
2050 if (all_ports_up == 0) {
2052 rte_delay_ms(CHECK_INTERVAL);
2055 /* set the print_flag if all ports up or timeout */
2056 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2066 rmv_event_callback(void *arg)
2068 struct rte_eth_dev *dev;
2069 portid_t port_id = (intptr_t)arg;
2071 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2072 dev = &rte_eth_devices[port_id];
2075 close_port(port_id);
2076 printf("removing device %s\n", dev->device->name);
2077 if (rte_eal_dev_detach(dev->device))
2078 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2082 /* This function is used by the interrupt thread */
2084 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2087 static const char * const event_desc[] = {
2088 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2089 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2090 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2091 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2092 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2093 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2094 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2095 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2096 [RTE_ETH_EVENT_NEW] = "device probed",
2097 [RTE_ETH_EVENT_DESTROY] = "device released",
2098 [RTE_ETH_EVENT_MAX] = NULL,
2101 RTE_SET_USED(param);
2102 RTE_SET_USED(ret_param);
2104 if (type >= RTE_ETH_EVENT_MAX) {
2105 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2106 port_id, __func__, type);
2108 } else if (event_print_mask & (UINT32_C(1) << type)) {
2109 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2114 if (port_id_is_invalid(port_id, DISABLED_WARN))
2118 case RTE_ETH_EVENT_INTR_RMV:
2119 if (rte_eal_alarm_set(100000,
2120 rmv_event_callback, (void *)(intptr_t)port_id))
2121 fprintf(stderr, "Could not set up deferred device removal\n");
2129 /* This function is used by the interrupt thread */
2131 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2132 __rte_unused void *arg)
2134 if (type >= RTE_DEV_EVENT_MAX) {
2135 fprintf(stderr, "%s called upon invalid event %d\n",
2141 case RTE_DEV_EVENT_REMOVE:
2142 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2144 /* TODO: After finish failure handle, begin to stop
2145 * packet forward, stop port, close port, detach port.
2148 case RTE_DEV_EVENT_ADD:
2149 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2151 /* TODO: After finish kernel driver binding,
2152 * begin to attach port.
2161 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2165 uint8_t mapping_found = 0;
2167 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2168 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2169 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2170 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2171 tx_queue_stats_mappings[i].queue_id,
2172 tx_queue_stats_mappings[i].stats_counter_id);
2179 port->tx_queue_stats_mapping_enabled = 1;
2184 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2188 uint8_t mapping_found = 0;
2190 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2191 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2192 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2193 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2194 rx_queue_stats_mappings[i].queue_id,
2195 rx_queue_stats_mappings[i].stats_counter_id);
2202 port->rx_queue_stats_mapping_enabled = 1;
2207 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2211 diag = set_tx_queue_stats_mapping_registers(pi, port);
2213 if (diag == -ENOTSUP) {
2214 port->tx_queue_stats_mapping_enabled = 0;
2215 printf("TX queue stats mapping not supported port id=%d\n", pi);
2218 rte_exit(EXIT_FAILURE,
2219 "set_tx_queue_stats_mapping_registers "
2220 "failed for port id=%d diag=%d\n",
2224 diag = set_rx_queue_stats_mapping_registers(pi, port);
2226 if (diag == -ENOTSUP) {
2227 port->rx_queue_stats_mapping_enabled = 0;
2228 printf("RX queue stats mapping not supported port id=%d\n", pi);
2231 rte_exit(EXIT_FAILURE,
2232 "set_rx_queue_stats_mapping_registers "
2233 "failed for port id=%d diag=%d\n",
2239 rxtx_port_config(struct rte_port *port)
2243 for (qid = 0; qid < nb_rxq; qid++) {
2244 port->rx_conf[qid] = port->dev_info.default_rxconf;
2246 /* Check if any Rx parameters have been passed */
2247 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2248 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2250 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2251 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2253 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2254 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2256 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2257 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2259 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2260 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2262 port->nb_rx_desc[qid] = nb_rxd;
2265 for (qid = 0; qid < nb_txq; qid++) {
2266 port->tx_conf[qid] = port->dev_info.default_txconf;
2268 /* Check if any Tx parameters have been passed */
2269 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2270 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2272 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2273 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2275 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2276 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2278 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2279 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2281 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2282 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2284 port->nb_tx_desc[qid] = nb_txd;
2289 init_port_config(void)
2292 struct rte_port *port;
2294 RTE_ETH_FOREACH_DEV(pid) {
2296 port->dev_conf.fdir_conf = fdir_conf;
2298 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2299 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2301 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2302 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2305 if (port->dcb_flag == 0) {
2306 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2307 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2309 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2312 rxtx_port_config(port);
2314 rte_eth_macaddr_get(pid, &port->eth_addr);
2316 map_port_queue_stats_mapping_registers(pid, port);
2317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2318 rte_pmd_ixgbe_bypass_init(pid);
2321 if (lsc_interrupt &&
2322 (rte_eth_devices[pid].data->dev_flags &
2323 RTE_ETH_DEV_INTR_LSC))
2324 port->dev_conf.intr_conf.lsc = 1;
2325 if (rmv_interrupt &&
2326 (rte_eth_devices[pid].data->dev_flags &
2327 RTE_ETH_DEV_INTR_RMV))
2328 port->dev_conf.intr_conf.rmv = 1;
2330 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2331 /* Detect softnic port */
2332 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2333 port->softnic_enable = 1;
2334 memset(&port->softport, 0, sizeof(struct softnic_port));
2336 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2337 port->softport.tm_flag = 1;
2343 void set_port_slave_flag(portid_t slave_pid)
2345 struct rte_port *port;
2347 port = &ports[slave_pid];
2348 port->slave_flag = 1;
2351 void clear_port_slave_flag(portid_t slave_pid)
2353 struct rte_port *port;
2355 port = &ports[slave_pid];
2356 port->slave_flag = 0;
2359 uint8_t port_is_bonding_slave(portid_t slave_pid)
2361 struct rte_port *port;
2363 port = &ports[slave_pid];
2364 return port->slave_flag;
2367 const uint16_t vlan_tags[] = {
2368 0, 1, 2, 3, 4, 5, 6, 7,
2369 8, 9, 10, 11, 12, 13, 14, 15,
2370 16, 17, 18, 19, 20, 21, 22, 23,
2371 24, 25, 26, 27, 28, 29, 30, 31
2375 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2376 enum dcb_mode_enable dcb_mode,
2377 enum rte_eth_nb_tcs num_tcs,
2383 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2384 * given above, and the number of traffic classes available for use.
2386 if (dcb_mode == DCB_VT_ENABLED) {
2387 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2388 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2389 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2390 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2392 /* VMDQ+DCB RX and TX configurations */
2393 vmdq_rx_conf->enable_default_pool = 0;
2394 vmdq_rx_conf->default_pool = 0;
2395 vmdq_rx_conf->nb_queue_pools =
2396 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2397 vmdq_tx_conf->nb_queue_pools =
2398 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2400 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2401 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2402 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2403 vmdq_rx_conf->pool_map[i].pools =
2404 1 << (i % vmdq_rx_conf->nb_queue_pools);
2406 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2407 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2408 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2411 /* set DCB mode of RX and TX of multiple queues */
2412 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2413 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2415 struct rte_eth_dcb_rx_conf *rx_conf =
2416 ð_conf->rx_adv_conf.dcb_rx_conf;
2417 struct rte_eth_dcb_tx_conf *tx_conf =
2418 ð_conf->tx_adv_conf.dcb_tx_conf;
2420 rx_conf->nb_tcs = num_tcs;
2421 tx_conf->nb_tcs = num_tcs;
2423 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2424 rx_conf->dcb_tc[i] = i % num_tcs;
2425 tx_conf->dcb_tc[i] = i % num_tcs;
2427 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2428 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2429 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2433 eth_conf->dcb_capability_en =
2434 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2436 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2442 init_port_dcb_config(portid_t pid,
2443 enum dcb_mode_enable dcb_mode,
2444 enum rte_eth_nb_tcs num_tcs,
2447 struct rte_eth_conf port_conf;
2448 struct rte_port *rte_port;
2452 rte_port = &ports[pid];
2454 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2455 /* Enter DCB configuration status */
2458 port_conf.rxmode = rte_port->dev_conf.rxmode;
2459 port_conf.txmode = rte_port->dev_conf.txmode;
2461 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2462 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2465 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2468 * Write the configuration into the device.
2469 * Set the numbers of RX & TX queues to 0, so
2470 * the RX & TX queues will not be setup.
2472 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2474 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2476 /* If dev_info.vmdq_pool_base is greater than 0,
2477 * the queue id of vmdq pools is started after pf queues.
2479 if (dcb_mode == DCB_VT_ENABLED &&
2480 rte_port->dev_info.vmdq_pool_base > 0) {
2481 printf("VMDQ_DCB multi-queue mode is nonsensical"
2482 " for port %d.", pid);
2486 /* Assume the ports in testpmd have the same dcb capability
2487 * and has the same number of rxq and txq in dcb mode
2489 if (dcb_mode == DCB_VT_ENABLED) {
2490 if (rte_port->dev_info.max_vfs > 0) {
2491 nb_rxq = rte_port->dev_info.nb_rx_queues;
2492 nb_txq = rte_port->dev_info.nb_tx_queues;
2494 nb_rxq = rte_port->dev_info.max_rx_queues;
2495 nb_txq = rte_port->dev_info.max_tx_queues;
2498 /*if vt is disabled, use all pf queues */
2499 if (rte_port->dev_info.vmdq_pool_base == 0) {
2500 nb_rxq = rte_port->dev_info.max_rx_queues;
2501 nb_txq = rte_port->dev_info.max_tx_queues;
2503 nb_rxq = (queueid_t)num_tcs;
2504 nb_txq = (queueid_t)num_tcs;
2508 rx_free_thresh = 64;
2510 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2512 rxtx_port_config(rte_port);
2514 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2515 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2516 rx_vft_set(pid, vlan_tags[i], 1);
2518 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2519 map_port_queue_stats_mapping_registers(pid, rte_port);
2521 rte_port->dcb_flag = 1;
2529 /* Configuration of Ethernet ports. */
2530 ports = rte_zmalloc("testpmd: ports",
2531 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2532 RTE_CACHE_LINE_SIZE);
2533 if (ports == NULL) {
2534 rte_exit(EXIT_FAILURE,
2535 "rte_zmalloc(%d struct rte_port) failed\n",
2551 const char clr[] = { 27, '[', '2', 'J', '\0' };
2552 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2554 /* Clear screen and move to top left */
2555 printf("%s%s", clr, top_left);
2557 printf("\nPort statistics ====================================");
2558 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2559 nic_stats_display(fwd_ports_ids[i]);
2563 signal_handler(int signum)
2565 if (signum == SIGINT || signum == SIGTERM) {
2566 printf("\nSignal %d received, preparing to exit...\n",
2568 #ifdef RTE_LIBRTE_PDUMP
2569 /* uninitialize packet capture framework */
2572 #ifdef RTE_LIBRTE_LATENCY_STATS
2573 rte_latencystats_uninit();
2576 /* Set flag to indicate the force termination. */
2578 /* exit with the expected status */
2579 signal(signum, SIG_DFL);
2580 kill(getpid(), signum);
2585 main(int argc, char** argv)
2591 signal(SIGINT, signal_handler);
2592 signal(SIGTERM, signal_handler);
2594 diag = rte_eal_init(argc, argv);
2596 rte_panic("Cannot init EAL\n");
2598 testpmd_logtype = rte_log_register("testpmd");
2599 if (testpmd_logtype < 0)
2600 rte_panic("Cannot register log type");
2601 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2603 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2604 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2608 #ifdef RTE_LIBRTE_PDUMP
2609 /* initialize packet capture framework */
2610 rte_pdump_init(NULL);
2613 nb_ports = (portid_t) rte_eth_dev_count_avail();
2615 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2617 /* allocate port structures, and init them */
2620 set_def_fwd_config();
2622 rte_panic("Empty set of forwarding logical cores - check the "
2623 "core mask supplied in the command parameters\n");
2625 /* Bitrate/latency stats disabled by default */
2626 #ifdef RTE_LIBRTE_BITRATE
2627 bitrate_enabled = 0;
2629 #ifdef RTE_LIBRTE_LATENCY_STATS
2630 latencystats_enabled = 0;
2636 launch_args_parse(argc, argv);
2638 if (tx_first && interactive)
2639 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2640 "interactive mode.\n");
2642 if (tx_first && lsc_interrupt) {
2643 printf("Warning: lsc_interrupt needs to be off when "
2644 " using tx_first. Disabling.\n");
2648 if (!nb_rxq && !nb_txq)
2649 printf("Warning: Either rx or tx queues should be non-zero\n");
2651 if (nb_rxq > 1 && nb_rxq > nb_txq)
2652 printf("Warning: nb_rxq=%d enables RSS configuration, "
2653 "but nb_txq=%d will prevent to fully test it.\n",
2659 /* enable hot plug monitoring */
2660 ret = rte_dev_event_monitor_start();
2665 eth_dev_event_callback_register();
2669 if (start_port(RTE_PORT_ALL) != 0)
2670 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2672 /* set all ports to promiscuous mode by default */
2673 RTE_ETH_FOREACH_DEV(port_id)
2674 rte_eth_promiscuous_enable(port_id);
2676 /* Init metrics library */
2677 rte_metrics_init(rte_socket_id());
2679 #ifdef RTE_LIBRTE_LATENCY_STATS
2680 if (latencystats_enabled != 0) {
2681 int ret = rte_latencystats_init(1, NULL);
2683 printf("Warning: latencystats init()"
2684 " returned error %d\n", ret);
2685 printf("Latencystats running on lcore %d\n",
2686 latencystats_lcore_id);
2690 /* Setup bitrate stats */
2691 #ifdef RTE_LIBRTE_BITRATE
2692 if (bitrate_enabled != 0) {
2693 bitrate_data = rte_stats_bitrate_create();
2694 if (bitrate_data == NULL)
2695 rte_exit(EXIT_FAILURE,
2696 "Could not allocate bitrate data.\n");
2697 rte_stats_bitrate_reg(bitrate_data);
2701 #ifdef RTE_LIBRTE_CMDLINE
2702 if (strlen(cmdline_filename) != 0)
2703 cmdline_read_from_file(cmdline_filename);
2705 if (interactive == 1) {
2707 printf("Start automatic packet forwarding\n");
2708 start_packet_forwarding(0);
2720 printf("No commandline core given, start packet forwarding\n");
2721 start_packet_forwarding(tx_first);
2722 if (stats_period != 0) {
2723 uint64_t prev_time = 0, cur_time, diff_time = 0;
2724 uint64_t timer_period;
2726 /* Convert to number of cycles */
2727 timer_period = stats_period * rte_get_timer_hz();
2729 while (f_quit == 0) {
2730 cur_time = rte_get_timer_cycles();
2731 diff_time += cur_time - prev_time;
2733 if (diff_time >= timer_period) {
2735 /* Reset the timer */
2738 /* Sleep to avoid unnecessary checks */
2739 prev_time = cur_time;
2744 printf("Press enter to exit\n");
2745 rc = read(0, &c, 1);