1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
160 &softnic_tm_bypass_engine,
162 #ifdef RTE_LIBRTE_IEEE1588
163 &ieee1588_fwd_engine,
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
176 * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
180 * In container, it cannot terminate the process which running with 'stats-period'
181 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
186 * Configuration of packet segments used by the "txonly" processing engine.
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 TXONLY_DEF_PACKET_LEN,
192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
207 * Configurable number of RX/TX queues.
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
213 * Configurable number of RX/TX ring descriptors.
214 * Defaults are supplied by drivers via ethdev.
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 #define RTE_PMD_PARAM_UNSET -1
223 * Configurable values of RX and TX ring threshold registers.
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX free threshold.
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of RX drop enable.
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX free threshold.
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX RS bit threshold.
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Flow API isolated mode.
272 uint8_t flow_isolate_all;
275 * Avoids to check link status when starting/stopping a port.
277 uint8_t no_link_check = 0; /* check by default */
280 * Enable link status change notification
282 uint8_t lsc_interrupt = 1; /* enabled by default */
285 * Enable device removal notification.
287 uint8_t rmv_interrupt = 1; /* enabled by default */
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
292 * Display or mask ether events
293 * Default to all events except VF_MBOX
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
303 * Decide if all memory are locked for performance.
308 * NIC bypass mode configuration options.
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
317 #ifdef RTE_LIBRTE_LATENCY_STATS
320 * Set when latency stats is enabled in the commandline
322 uint8_t latencystats_enabled;
325 * Lcore ID to serive latency statistics.
327 lcoreid_t latencystats_lcore_id = -1;
332 * Ethernet device configuration.
334 struct rte_eth_rxmode rx_mode = {
335 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
339 struct rte_eth_txmode tx_mode = {
340 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
343 struct rte_fdir_conf fdir_conf = {
344 .mode = RTE_FDIR_MODE_NONE,
345 .pballoc = RTE_FDIR_PBALLOC_64K,
346 .status = RTE_FDIR_REPORT_STATUS,
348 .vlan_tci_mask = 0xFFEF,
350 .src_ip = 0xFFFFFFFF,
351 .dst_ip = 0xFFFFFFFF,
354 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
355 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
357 .src_port_mask = 0xFFFF,
358 .dst_port_mask = 0xFFFF,
359 .mac_addr_byte_mask = 0xFF,
360 .tunnel_type_mask = 1,
361 .tunnel_id_mask = 0xFFFFFFFF,
366 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
368 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
369 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
371 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
372 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
374 uint16_t nb_tx_queue_stats_mappings = 0;
375 uint16_t nb_rx_queue_stats_mappings = 0;
378 * Display zero values by default for xstats
380 uint8_t xstats_hide_zero;
382 unsigned int num_sockets = 0;
383 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
385 #ifdef RTE_LIBRTE_BITRATE
386 /* Bitrate statistics */
387 struct rte_stats_bitrates *bitrate_data;
388 lcoreid_t bitrate_lcore_id;
389 uint8_t bitrate_enabled;
392 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
393 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
395 /* Forward function declarations */
396 static void map_port_queue_stats_mapping_registers(portid_t pi,
397 struct rte_port *port);
398 static void check_all_ports_link_status(uint32_t port_mask);
399 static int eth_event_callback(portid_t port_id,
400 enum rte_eth_event_type type,
401 void *param, void *ret_param);
402 static void eth_dev_event_callback(char *device_name,
403 enum rte_dev_event_type type,
405 static int eth_dev_event_callback_register(void);
406 static int eth_dev_event_callback_unregister(void);
410 * Check if all the ports are started.
411 * If yes, return positive value. If not, return zero.
413 static int all_ports_started(void);
415 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
416 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
419 * Helper function to check if socket is already discovered.
420 * If yes, return positive value. If not, return zero.
423 new_socket_id(unsigned int socket_id)
427 for (i = 0; i < num_sockets; i++) {
428 if (socket_ids[i] == socket_id)
435 * Setup default configuration.
438 set_default_fwd_lcores_config(void)
442 unsigned int sock_num;
445 for (i = 0; i < RTE_MAX_LCORE; i++) {
446 sock_num = rte_lcore_to_socket_id(i);
447 if (new_socket_id(sock_num)) {
448 if (num_sockets >= RTE_MAX_NUMA_NODES) {
449 rte_exit(EXIT_FAILURE,
450 "Total sockets greater than %u\n",
453 socket_ids[num_sockets++] = sock_num;
455 if (!rte_lcore_is_enabled(i))
457 if (i == rte_get_master_lcore())
459 fwd_lcores_cpuids[nb_lc++] = i;
461 nb_lcores = (lcoreid_t) nb_lc;
462 nb_cfg_lcores = nb_lcores;
467 set_def_peer_eth_addrs(void)
471 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
472 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
473 peer_eth_addrs[i].addr_bytes[5] = i;
478 set_default_fwd_ports_config(void)
483 RTE_ETH_FOREACH_DEV(pt_id)
484 fwd_ports_ids[i++] = pt_id;
486 nb_cfg_ports = nb_ports;
487 nb_fwd_ports = nb_ports;
491 set_def_fwd_config(void)
493 set_default_fwd_lcores_config();
494 set_def_peer_eth_addrs();
495 set_default_fwd_ports_config();
499 * Configuration initialisation done once at init time.
502 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
503 unsigned int socket_id)
505 char pool_name[RTE_MEMPOOL_NAMESIZE];
506 struct rte_mempool *rte_mp = NULL;
509 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
510 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
513 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
514 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
517 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
518 mb_size, (unsigned) mb_mempool_cache,
519 sizeof(struct rte_pktmbuf_pool_private),
524 if (rte_mempool_populate_anon(rte_mp) == 0) {
525 rte_mempool_free(rte_mp);
529 rte_pktmbuf_pool_init(rte_mp, NULL);
530 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
532 /* wrapper to rte_mempool_create() */
533 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
534 rte_mbuf_best_mempool_ops());
535 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
536 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
540 if (rte_mp == NULL) {
541 rte_exit(EXIT_FAILURE,
542 "Creation of mbuf pool for socket %u failed: %s\n",
543 socket_id, rte_strerror(rte_errno));
544 } else if (verbose_level > 0) {
545 rte_mempool_dump(stdout, rte_mp);
550 * Check given socket id is valid or not with NUMA mode,
551 * if valid, return 0, else return -1
554 check_socket_id(const unsigned int socket_id)
556 static int warning_once = 0;
558 if (new_socket_id(socket_id)) {
559 if (!warning_once && numa_support)
560 printf("Warning: NUMA should be configured manually by"
561 " using --port-numa-config and"
562 " --ring-numa-config parameters along with"
571 * Get the allowed maximum number of RX queues.
572 * *pid return the port id which has minimal value of
573 * max_rx_queues in all ports.
576 get_allowed_max_nb_rxq(portid_t *pid)
578 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580 struct rte_eth_dev_info dev_info;
582 RTE_ETH_FOREACH_DEV(pi) {
583 rte_eth_dev_info_get(pi, &dev_info);
584 if (dev_info.max_rx_queues < allowed_max_rxq) {
585 allowed_max_rxq = dev_info.max_rx_queues;
589 return allowed_max_rxq;
593 * Check input rxq is valid or not.
594 * If input rxq is not greater than any of maximum number
595 * of RX queues of all ports, it is valid.
596 * if valid, return 0, else return -1
599 check_nb_rxq(queueid_t rxq)
601 queueid_t allowed_max_rxq;
604 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
605 if (rxq > allowed_max_rxq) {
606 printf("Fail: input rxq (%u) can't be greater "
607 "than max_rx_queues (%u) of port %u\n",
617 * Get the allowed maximum number of TX queues.
618 * *pid return the port id which has minimal value of
619 * max_tx_queues in all ports.
622 get_allowed_max_nb_txq(portid_t *pid)
624 queueid_t allowed_max_txq = MAX_QUEUE_ID;
626 struct rte_eth_dev_info dev_info;
628 RTE_ETH_FOREACH_DEV(pi) {
629 rte_eth_dev_info_get(pi, &dev_info);
630 if (dev_info.max_tx_queues < allowed_max_txq) {
631 allowed_max_txq = dev_info.max_tx_queues;
635 return allowed_max_txq;
639 * Check input txq is valid or not.
640 * If input txq is not greater than any of maximum number
641 * of TX queues of all ports, it is valid.
642 * if valid, return 0, else return -1
645 check_nb_txq(queueid_t txq)
647 queueid_t allowed_max_txq;
650 allowed_max_txq = get_allowed_max_nb_txq(&pid);
651 if (txq > allowed_max_txq) {
652 printf("Fail: input txq (%u) can't be greater "
653 "than max_tx_queues (%u) of port %u\n",
666 struct rte_port *port;
667 struct rte_mempool *mbp;
668 unsigned int nb_mbuf_per_pool;
670 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
671 struct rte_gro_param gro_param;
675 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
678 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
683 /* Configuration of logical cores. */
684 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 sizeof(struct fwd_lcore *) * nb_lcores,
686 RTE_CACHE_LINE_SIZE);
687 if (fwd_lcores == NULL) {
688 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 "failed\n", nb_lcores);
691 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 sizeof(struct fwd_lcore),
694 RTE_CACHE_LINE_SIZE);
695 if (fwd_lcores[lc_id] == NULL) {
696 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
699 fwd_lcores[lc_id]->cpuid_idx = lc_id;
702 RTE_ETH_FOREACH_DEV(pid) {
704 /* Apply default TxRx configuration for all ports */
705 port->dev_conf.txmode = tx_mode;
706 port->dev_conf.rxmode = rx_mode;
707 rte_eth_dev_info_get(pid, &port->dev_info);
709 if (!(port->dev_info.rx_offload_capa &
710 DEV_RX_OFFLOAD_CRC_STRIP))
711 port->dev_conf.rxmode.offloads &=
712 ~DEV_RX_OFFLOAD_CRC_STRIP;
713 if (!(port->dev_info.tx_offload_capa &
714 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
715 port->dev_conf.txmode.offloads &=
716 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
718 if (port_numa[pid] != NUMA_NO_CONFIG)
719 port_per_socket[port_numa[pid]]++;
721 uint32_t socket_id = rte_eth_dev_socket_id(pid);
723 /* if socket_id is invalid, set to 0 */
724 if (check_socket_id(socket_id) < 0)
726 port_per_socket[socket_id]++;
730 /* Apply Rx offloads configuration */
731 for (k = 0; k < port->dev_info.max_rx_queues; k++)
732 port->rx_conf[k].offloads =
733 port->dev_conf.rxmode.offloads;
734 /* Apply Tx offloads configuration */
735 for (k = 0; k < port->dev_info.max_tx_queues; k++)
736 port->tx_conf[k].offloads =
737 port->dev_conf.txmode.offloads;
739 /* set flag to initialize port/queue */
740 port->need_reconfig = 1;
741 port->need_reconfig_queues = 1;
745 * Create pools of mbuf.
746 * If NUMA support is disabled, create a single pool of mbuf in
747 * socket 0 memory by default.
748 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
750 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
751 * nb_txd can be configured at run time.
753 if (param_total_num_mbufs)
754 nb_mbuf_per_pool = param_total_num_mbufs;
756 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
757 (nb_lcores * mb_mempool_cache) +
758 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
759 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
765 for (i = 0; i < num_sockets; i++)
766 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
769 if (socket_num == UMA_NO_CONFIG)
770 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
772 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
778 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
779 DEV_TX_OFFLOAD_GRE_TNL_TSO;
781 * Records which Mbuf pool to use by each logical core, if needed.
783 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
784 mbp = mbuf_pool_find(
785 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
788 mbp = mbuf_pool_find(0);
789 fwd_lcores[lc_id]->mbp = mbp;
790 /* initialize GSO context */
791 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
792 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
793 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
794 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
796 fwd_lcores[lc_id]->gso_ctx.flag = 0;
799 /* Configuration of packet forwarding streams. */
800 if (init_fwd_streams() < 0)
801 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
805 /* create a gro context for each lcore */
806 gro_param.gro_types = RTE_GRO_TCP_IPV4;
807 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
808 gro_param.max_item_per_flow = MAX_PKT_BURST;
809 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
810 gro_param.socket_id = rte_lcore_to_socket_id(
811 fwd_lcores_cpuids[lc_id]);
812 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
813 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
814 rte_exit(EXIT_FAILURE,
815 "rte_gro_ctx_create() failed\n");
822 reconfig(portid_t new_port_id, unsigned socket_id)
824 struct rte_port *port;
826 /* Reconfiguration of Ethernet ports. */
827 port = &ports[new_port_id];
828 rte_eth_dev_info_get(new_port_id, &port->dev_info);
830 /* set flag to initialize port/queue */
831 port->need_reconfig = 1;
832 port->need_reconfig_queues = 1;
833 port->socket_id = socket_id;
840 init_fwd_streams(void)
843 struct rte_port *port;
844 streamid_t sm_id, nb_fwd_streams_new;
847 /* set socket id according to numa or not */
848 RTE_ETH_FOREACH_DEV(pid) {
850 if (nb_rxq > port->dev_info.max_rx_queues) {
851 printf("Fail: nb_rxq(%d) is greater than "
852 "max_rx_queues(%d)\n", nb_rxq,
853 port->dev_info.max_rx_queues);
856 if (nb_txq > port->dev_info.max_tx_queues) {
857 printf("Fail: nb_txq(%d) is greater than "
858 "max_tx_queues(%d)\n", nb_txq,
859 port->dev_info.max_tx_queues);
863 if (port_numa[pid] != NUMA_NO_CONFIG)
864 port->socket_id = port_numa[pid];
866 port->socket_id = rte_eth_dev_socket_id(pid);
868 /* if socket_id is invalid, set to 0 */
869 if (check_socket_id(port->socket_id) < 0)
874 if (socket_num == UMA_NO_CONFIG)
877 port->socket_id = socket_num;
881 q = RTE_MAX(nb_rxq, nb_txq);
883 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
886 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
887 if (nb_fwd_streams_new == nb_fwd_streams)
890 if (fwd_streams != NULL) {
891 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
892 if (fwd_streams[sm_id] == NULL)
894 rte_free(fwd_streams[sm_id]);
895 fwd_streams[sm_id] = NULL;
897 rte_free(fwd_streams);
902 nb_fwd_streams = nb_fwd_streams_new;
903 if (nb_fwd_streams) {
904 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
905 sizeof(struct fwd_stream *) * nb_fwd_streams,
906 RTE_CACHE_LINE_SIZE);
907 if (fwd_streams == NULL)
908 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
909 " (struct fwd_stream *)) failed\n",
912 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
913 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
914 " struct fwd_stream", sizeof(struct fwd_stream),
915 RTE_CACHE_LINE_SIZE);
916 if (fwd_streams[sm_id] == NULL)
917 rte_exit(EXIT_FAILURE, "rte_zmalloc"
918 "(struct fwd_stream) failed\n");
925 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
927 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
929 unsigned int total_burst;
930 unsigned int nb_burst;
931 unsigned int burst_stats[3];
932 uint16_t pktnb_stats[3];
934 int burst_percent[3];
937 * First compute the total number of packet bursts and the
938 * two highest numbers of bursts of the same number of packets.
941 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
942 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
943 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
944 nb_burst = pbs->pkt_burst_spread[nb_pkt];
947 total_burst += nb_burst;
948 if (nb_burst > burst_stats[0]) {
949 burst_stats[1] = burst_stats[0];
950 pktnb_stats[1] = pktnb_stats[0];
951 burst_stats[0] = nb_burst;
952 pktnb_stats[0] = nb_pkt;
953 } else if (nb_burst > burst_stats[1]) {
954 burst_stats[1] = nb_burst;
955 pktnb_stats[1] = nb_pkt;
958 if (total_burst == 0)
960 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
961 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
962 burst_percent[0], (int) pktnb_stats[0]);
963 if (burst_stats[0] == total_burst) {
967 if (burst_stats[0] + burst_stats[1] == total_burst) {
968 printf(" + %d%% of %d pkts]\n",
969 100 - burst_percent[0], pktnb_stats[1]);
972 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
973 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
974 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
975 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
978 printf(" + %d%% of %d pkts + %d%% of others]\n",
979 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
981 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
984 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
986 struct rte_port *port;
989 static const char *fwd_stats_border = "----------------------";
991 port = &ports[port_id];
992 printf("\n %s Forward statistics for port %-2d %s\n",
993 fwd_stats_border, port_id, fwd_stats_border);
995 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
996 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
998 stats->ipackets, stats->imissed,
999 (uint64_t) (stats->ipackets + stats->imissed));
1001 if (cur_fwd_eng == &csum_fwd_engine)
1002 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1003 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1004 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1005 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1006 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1009 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1011 stats->opackets, port->tx_dropped,
1012 (uint64_t) (stats->opackets + port->tx_dropped));
1015 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1017 stats->ipackets, stats->imissed,
1018 (uint64_t) (stats->ipackets + stats->imissed));
1020 if (cur_fwd_eng == &csum_fwd_engine)
1021 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1022 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1023 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1024 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1025 printf(" RX-nombufs: %14"PRIu64"\n",
1029 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1031 stats->opackets, port->tx_dropped,
1032 (uint64_t) (stats->opackets + port->tx_dropped));
1035 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1036 if (port->rx_stream)
1037 pkt_burst_stats_display("RX",
1038 &port->rx_stream->rx_burst_stats);
1039 if (port->tx_stream)
1040 pkt_burst_stats_display("TX",
1041 &port->tx_stream->tx_burst_stats);
1044 if (port->rx_queue_stats_mapping_enabled) {
1046 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1047 printf(" Stats reg %2d RX-packets:%14"PRIu64
1048 " RX-errors:%14"PRIu64
1049 " RX-bytes:%14"PRIu64"\n",
1050 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1054 if (port->tx_queue_stats_mapping_enabled) {
1055 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1056 printf(" Stats reg %2d TX-packets:%14"PRIu64
1057 " TX-bytes:%14"PRIu64"\n",
1058 i, stats->q_opackets[i], stats->q_obytes[i]);
1062 printf(" %s--------------------------------%s\n",
1063 fwd_stats_border, fwd_stats_border);
1067 fwd_stream_stats_display(streamid_t stream_id)
1069 struct fwd_stream *fs;
1070 static const char *fwd_top_stats_border = "-------";
1072 fs = fwd_streams[stream_id];
1073 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1074 (fs->fwd_dropped == 0))
1076 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1077 "TX Port=%2d/Queue=%2d %s\n",
1078 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1079 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1080 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1081 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1083 /* if checksum mode */
1084 if (cur_fwd_eng == &csum_fwd_engine) {
1085 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1086 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1089 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1090 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1091 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1096 flush_fwd_rx_queues(void)
1098 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1105 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1106 uint64_t timer_period;
1108 /* convert to number of cycles */
1109 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1111 for (j = 0; j < 2; j++) {
1112 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1113 for (rxq = 0; rxq < nb_rxq; rxq++) {
1114 port_id = fwd_ports_ids[rxp];
1116 * testpmd can stuck in the below do while loop
1117 * if rte_eth_rx_burst() always returns nonzero
1118 * packets. So timer is added to exit this loop
1119 * after 1sec timer expiry.
1121 prev_tsc = rte_rdtsc();
1123 nb_rx = rte_eth_rx_burst(port_id, rxq,
1124 pkts_burst, MAX_PKT_BURST);
1125 for (i = 0; i < nb_rx; i++)
1126 rte_pktmbuf_free(pkts_burst[i]);
1128 cur_tsc = rte_rdtsc();
1129 diff_tsc = cur_tsc - prev_tsc;
1130 timer_tsc += diff_tsc;
1131 } while ((nb_rx > 0) &&
1132 (timer_tsc < timer_period));
1136 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1141 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1143 struct fwd_stream **fsm;
1146 #ifdef RTE_LIBRTE_BITRATE
1147 uint64_t tics_per_1sec;
1148 uint64_t tics_datum;
1149 uint64_t tics_current;
1152 tics_datum = rte_rdtsc();
1153 tics_per_1sec = rte_get_timer_hz();
1155 fsm = &fwd_streams[fc->stream_idx];
1156 nb_fs = fc->stream_nb;
1158 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1159 (*pkt_fwd)(fsm[sm_id]);
1160 #ifdef RTE_LIBRTE_BITRATE
1161 if (bitrate_enabled != 0 &&
1162 bitrate_lcore_id == rte_lcore_id()) {
1163 tics_current = rte_rdtsc();
1164 if (tics_current - tics_datum >= tics_per_1sec) {
1165 /* Periodic bitrate calculation */
1166 RTE_ETH_FOREACH_DEV(idx_port)
1167 rte_stats_bitrate_calc(bitrate_data,
1169 tics_datum = tics_current;
1173 #ifdef RTE_LIBRTE_LATENCY_STATS
1174 if (latencystats_enabled != 0 &&
1175 latencystats_lcore_id == rte_lcore_id())
1176 rte_latencystats_update();
1179 } while (! fc->stopped);
1183 start_pkt_forward_on_core(void *fwd_arg)
1185 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1186 cur_fwd_config.fwd_eng->packet_fwd);
1191 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1192 * Used to start communication flows in network loopback test configurations.
1195 run_one_txonly_burst_on_core(void *fwd_arg)
1197 struct fwd_lcore *fwd_lc;
1198 struct fwd_lcore tmp_lcore;
1200 fwd_lc = (struct fwd_lcore *) fwd_arg;
1201 tmp_lcore = *fwd_lc;
1202 tmp_lcore.stopped = 1;
1203 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1208 * Launch packet forwarding:
1209 * - Setup per-port forwarding context.
1210 * - launch logical cores with their forwarding configuration.
1213 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1215 port_fwd_begin_t port_fwd_begin;
1220 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1221 if (port_fwd_begin != NULL) {
1222 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1223 (*port_fwd_begin)(fwd_ports_ids[i]);
1225 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1226 lc_id = fwd_lcores_cpuids[i];
1227 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1228 fwd_lcores[i]->stopped = 0;
1229 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1230 fwd_lcores[i], lc_id);
1232 printf("launch lcore %u failed - diag=%d\n",
1239 * Update the forward ports list.
1242 update_fwd_ports(portid_t new_pid)
1245 unsigned int new_nb_fwd_ports = 0;
1248 for (i = 0; i < nb_fwd_ports; ++i) {
1249 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1252 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1256 if (new_pid < RTE_MAX_ETHPORTS)
1257 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1259 nb_fwd_ports = new_nb_fwd_ports;
1260 nb_cfg_ports = new_nb_fwd_ports;
1264 * Launch packet forwarding configuration.
1267 start_packet_forwarding(int with_tx_first)
1269 port_fwd_begin_t port_fwd_begin;
1270 port_fwd_end_t port_fwd_end;
1271 struct rte_port *port;
1276 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1277 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1279 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1280 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1282 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1283 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1284 (!nb_rxq || !nb_txq))
1285 rte_exit(EXIT_FAILURE,
1286 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1287 cur_fwd_eng->fwd_mode_name);
1289 if (all_ports_started() == 0) {
1290 printf("Not all ports were started\n");
1293 if (test_done == 0) {
1294 printf("Packet forwarding already started\n");
1300 for (i = 0; i < nb_fwd_ports; i++) {
1301 pt_id = fwd_ports_ids[i];
1302 port = &ports[pt_id];
1303 if (!port->dcb_flag) {
1304 printf("In DCB mode, all forwarding ports must "
1305 "be configured in this mode.\n");
1309 if (nb_fwd_lcores == 1) {
1310 printf("In DCB mode,the nb forwarding cores "
1311 "should be larger than 1.\n");
1320 flush_fwd_rx_queues();
1322 pkt_fwd_config_display(&cur_fwd_config);
1323 rxtx_config_display();
1325 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1326 pt_id = fwd_ports_ids[i];
1327 port = &ports[pt_id];
1328 rte_eth_stats_get(pt_id, &port->stats);
1329 port->tx_dropped = 0;
1331 map_port_queue_stats_mapping_registers(pt_id, port);
1333 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1334 fwd_streams[sm_id]->rx_packets = 0;
1335 fwd_streams[sm_id]->tx_packets = 0;
1336 fwd_streams[sm_id]->fwd_dropped = 0;
1337 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1338 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1340 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1341 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1342 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1343 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1344 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1346 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1347 fwd_streams[sm_id]->core_cycles = 0;
1350 if (with_tx_first) {
1351 port_fwd_begin = tx_only_engine.port_fwd_begin;
1352 if (port_fwd_begin != NULL) {
1353 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1354 (*port_fwd_begin)(fwd_ports_ids[i]);
1356 while (with_tx_first--) {
1357 launch_packet_forwarding(
1358 run_one_txonly_burst_on_core);
1359 rte_eal_mp_wait_lcore();
1361 port_fwd_end = tx_only_engine.port_fwd_end;
1362 if (port_fwd_end != NULL) {
1363 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1364 (*port_fwd_end)(fwd_ports_ids[i]);
1367 launch_packet_forwarding(start_pkt_forward_on_core);
1371 stop_packet_forwarding(void)
1373 struct rte_eth_stats stats;
1374 struct rte_port *port;
1375 port_fwd_end_t port_fwd_end;
1380 uint64_t total_recv;
1381 uint64_t total_xmit;
1382 uint64_t total_rx_dropped;
1383 uint64_t total_tx_dropped;
1384 uint64_t total_rx_nombuf;
1385 uint64_t tx_dropped;
1386 uint64_t rx_bad_ip_csum;
1387 uint64_t rx_bad_l4_csum;
1388 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1389 uint64_t fwd_cycles;
1392 static const char *acc_stats_border = "+++++++++++++++";
1395 printf("Packet forwarding not started\n");
1398 printf("Telling cores to stop...");
1399 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1400 fwd_lcores[lc_id]->stopped = 1;
1401 printf("\nWaiting for lcores to finish...\n");
1402 rte_eal_mp_wait_lcore();
1403 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1404 if (port_fwd_end != NULL) {
1405 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1406 pt_id = fwd_ports_ids[i];
1407 (*port_fwd_end)(pt_id);
1410 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1413 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1414 if (cur_fwd_config.nb_fwd_streams >
1415 cur_fwd_config.nb_fwd_ports) {
1416 fwd_stream_stats_display(sm_id);
1417 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1418 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1420 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1422 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1425 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1426 tx_dropped = (uint64_t) (tx_dropped +
1427 fwd_streams[sm_id]->fwd_dropped);
1428 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1431 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1432 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1433 fwd_streams[sm_id]->rx_bad_ip_csum);
1434 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1438 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1439 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1440 fwd_streams[sm_id]->rx_bad_l4_csum);
1441 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1444 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1445 fwd_cycles = (uint64_t) (fwd_cycles +
1446 fwd_streams[sm_id]->core_cycles);
1451 total_rx_dropped = 0;
1452 total_tx_dropped = 0;
1453 total_rx_nombuf = 0;
1454 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1455 pt_id = fwd_ports_ids[i];
1457 port = &ports[pt_id];
1458 rte_eth_stats_get(pt_id, &stats);
1459 stats.ipackets -= port->stats.ipackets;
1460 port->stats.ipackets = 0;
1461 stats.opackets -= port->stats.opackets;
1462 port->stats.opackets = 0;
1463 stats.ibytes -= port->stats.ibytes;
1464 port->stats.ibytes = 0;
1465 stats.obytes -= port->stats.obytes;
1466 port->stats.obytes = 0;
1467 stats.imissed -= port->stats.imissed;
1468 port->stats.imissed = 0;
1469 stats.oerrors -= port->stats.oerrors;
1470 port->stats.oerrors = 0;
1471 stats.rx_nombuf -= port->stats.rx_nombuf;
1472 port->stats.rx_nombuf = 0;
1474 total_recv += stats.ipackets;
1475 total_xmit += stats.opackets;
1476 total_rx_dropped += stats.imissed;
1477 total_tx_dropped += port->tx_dropped;
1478 total_rx_nombuf += stats.rx_nombuf;
1480 fwd_port_stats_display(pt_id, &stats);
1483 printf("\n %s Accumulated forward statistics for all ports"
1485 acc_stats_border, acc_stats_border);
1486 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1488 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1490 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1491 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1492 if (total_rx_nombuf > 0)
1493 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1494 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1496 acc_stats_border, acc_stats_border);
1497 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1499 printf("\n CPU cycles/packet=%u (total cycles="
1500 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1501 (unsigned int)(fwd_cycles / total_recv),
1502 fwd_cycles, total_recv);
1504 printf("\nDone.\n");
1509 dev_set_link_up(portid_t pid)
1511 if (rte_eth_dev_set_link_up(pid) < 0)
1512 printf("\nSet link up fail.\n");
1516 dev_set_link_down(portid_t pid)
1518 if (rte_eth_dev_set_link_down(pid) < 0)
1519 printf("\nSet link down fail.\n");
1523 all_ports_started(void)
1526 struct rte_port *port;
1528 RTE_ETH_FOREACH_DEV(pi) {
1530 /* Check if there is a port which is not started */
1531 if ((port->port_status != RTE_PORT_STARTED) &&
1532 (port->slave_flag == 0))
1536 /* No port is not started */
1541 port_is_stopped(portid_t port_id)
1543 struct rte_port *port = &ports[port_id];
1545 if ((port->port_status != RTE_PORT_STOPPED) &&
1546 (port->slave_flag == 0))
1552 all_ports_stopped(void)
1556 RTE_ETH_FOREACH_DEV(pi) {
1557 if (!port_is_stopped(pi))
1565 port_is_started(portid_t port_id)
1567 if (port_id_is_invalid(port_id, ENABLED_WARN))
1570 if (ports[port_id].port_status != RTE_PORT_STARTED)
1577 port_is_closed(portid_t port_id)
1579 if (port_id_is_invalid(port_id, ENABLED_WARN))
1582 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1589 start_port(portid_t pid)
1591 int diag, need_check_link_status = -1;
1594 struct rte_port *port;
1595 struct ether_addr mac_addr;
1596 enum rte_eth_event_type event_type;
1598 if (port_id_is_invalid(pid, ENABLED_WARN))
1603 RTE_ETH_FOREACH_DEV(pi) {
1604 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1607 need_check_link_status = 0;
1609 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1610 RTE_PORT_HANDLING) == 0) {
1611 printf("Port %d is now not stopped\n", pi);
1615 if (port->need_reconfig > 0) {
1616 port->need_reconfig = 0;
1618 if (flow_isolate_all) {
1619 int ret = port_flow_isolate(pi, 1);
1621 printf("Failed to apply isolated"
1622 " mode on port %d\n", pi);
1627 printf("Configuring Port %d (socket %u)\n", pi,
1629 /* configure port */
1630 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1633 if (rte_atomic16_cmpset(&(port->port_status),
1634 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1635 printf("Port %d can not be set back "
1636 "to stopped\n", pi);
1637 printf("Fail to configure port %d\n", pi);
1638 /* try to reconfigure port next time */
1639 port->need_reconfig = 1;
1643 if (port->need_reconfig_queues > 0) {
1644 port->need_reconfig_queues = 0;
1645 /* setup tx queues */
1646 for (qi = 0; qi < nb_txq; qi++) {
1647 if ((numa_support) &&
1648 (txring_numa[pi] != NUMA_NO_CONFIG))
1649 diag = rte_eth_tx_queue_setup(pi, qi,
1650 port->nb_tx_desc[qi],
1652 &(port->tx_conf[qi]));
1654 diag = rte_eth_tx_queue_setup(pi, qi,
1655 port->nb_tx_desc[qi],
1657 &(port->tx_conf[qi]));
1662 /* Fail to setup tx queue, return */
1663 if (rte_atomic16_cmpset(&(port->port_status),
1665 RTE_PORT_STOPPED) == 0)
1666 printf("Port %d can not be set back "
1667 "to stopped\n", pi);
1668 printf("Fail to configure port %d tx queues\n",
1670 /* try to reconfigure queues next time */
1671 port->need_reconfig_queues = 1;
1674 for (qi = 0; qi < nb_rxq; qi++) {
1675 /* setup rx queues */
1676 if ((numa_support) &&
1677 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1678 struct rte_mempool * mp =
1679 mbuf_pool_find(rxring_numa[pi]);
1681 printf("Failed to setup RX queue:"
1682 "No mempool allocation"
1683 " on the socket %d\n",
1688 diag = rte_eth_rx_queue_setup(pi, qi,
1689 port->nb_rx_desc[qi],
1691 &(port->rx_conf[qi]),
1694 struct rte_mempool *mp =
1695 mbuf_pool_find(port->socket_id);
1697 printf("Failed to setup RX queue:"
1698 "No mempool allocation"
1699 " on the socket %d\n",
1703 diag = rte_eth_rx_queue_setup(pi, qi,
1704 port->nb_rx_desc[qi],
1706 &(port->rx_conf[qi]),
1712 /* Fail to setup rx queue, return */
1713 if (rte_atomic16_cmpset(&(port->port_status),
1715 RTE_PORT_STOPPED) == 0)
1716 printf("Port %d can not be set back "
1717 "to stopped\n", pi);
1718 printf("Fail to configure port %d rx queues\n",
1720 /* try to reconfigure queues next time */
1721 port->need_reconfig_queues = 1;
1727 if (rte_eth_dev_start(pi) < 0) {
1728 printf("Fail to start port %d\n", pi);
1730 /* Fail to setup rx queue, return */
1731 if (rte_atomic16_cmpset(&(port->port_status),
1732 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1733 printf("Port %d can not be set back to "
1738 if (rte_atomic16_cmpset(&(port->port_status),
1739 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1740 printf("Port %d can not be set into started\n", pi);
1742 rte_eth_macaddr_get(pi, &mac_addr);
1743 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1744 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1745 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1746 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1748 /* at least one port started, need checking link status */
1749 need_check_link_status = 1;
1752 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1753 event_type < RTE_ETH_EVENT_MAX;
1755 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1760 printf("Failed to setup even callback for event %d\n",
1766 if (need_check_link_status == 1 && !no_link_check)
1767 check_all_ports_link_status(RTE_PORT_ALL);
1768 else if (need_check_link_status == 0)
1769 printf("Please stop the ports first\n");
1776 stop_port(portid_t pid)
1779 struct rte_port *port;
1780 int need_check_link_status = 0;
1787 if (port_id_is_invalid(pid, ENABLED_WARN))
1790 printf("Stopping ports...\n");
1792 RTE_ETH_FOREACH_DEV(pi) {
1793 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1796 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1797 printf("Please remove port %d from forwarding configuration.\n", pi);
1801 if (port_is_bonding_slave(pi)) {
1802 printf("Please remove port %d from bonded device.\n", pi);
1807 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1808 RTE_PORT_HANDLING) == 0)
1811 rte_eth_dev_stop(pi);
1813 if (rte_atomic16_cmpset(&(port->port_status),
1814 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1815 printf("Port %d can not be set into stopped\n", pi);
1816 need_check_link_status = 1;
1818 if (need_check_link_status && !no_link_check)
1819 check_all_ports_link_status(RTE_PORT_ALL);
1825 close_port(portid_t pid)
1828 struct rte_port *port;
1830 if (port_id_is_invalid(pid, ENABLED_WARN))
1833 printf("Closing ports...\n");
1835 RTE_ETH_FOREACH_DEV(pi) {
1836 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1839 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1840 printf("Please remove port %d from forwarding configuration.\n", pi);
1844 if (port_is_bonding_slave(pi)) {
1845 printf("Please remove port %d from bonded device.\n", pi);
1850 if (rte_atomic16_cmpset(&(port->port_status),
1851 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1852 printf("Port %d is already closed\n", pi);
1856 if (rte_atomic16_cmpset(&(port->port_status),
1857 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1858 printf("Port %d is now not stopped\n", pi);
1862 if (port->flow_list)
1863 port_flow_flush(pi);
1864 rte_eth_dev_close(pi);
1866 if (rte_atomic16_cmpset(&(port->port_status),
1867 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1868 printf("Port %d cannot be set to closed\n", pi);
1875 reset_port(portid_t pid)
1879 struct rte_port *port;
1881 if (port_id_is_invalid(pid, ENABLED_WARN))
1884 printf("Resetting ports...\n");
1886 RTE_ETH_FOREACH_DEV(pi) {
1887 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1890 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1891 printf("Please remove port %d from forwarding "
1892 "configuration.\n", pi);
1896 if (port_is_bonding_slave(pi)) {
1897 printf("Please remove port %d from bonded device.\n",
1902 diag = rte_eth_dev_reset(pi);
1905 port->need_reconfig = 1;
1906 port->need_reconfig_queues = 1;
1908 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1916 eth_dev_event_callback_register(void)
1920 /* register the device event callback */
1921 ret = rte_dev_event_callback_register(NULL,
1922 eth_dev_event_callback, NULL);
1924 printf("Failed to register device event callback\n");
1933 eth_dev_event_callback_unregister(void)
1937 /* unregister the device event callback */
1938 ret = rte_dev_event_callback_unregister(NULL,
1939 eth_dev_event_callback, NULL);
1941 printf("Failed to unregister device event callback\n");
1949 attach_port(char *identifier)
1952 unsigned int socket_id;
1954 printf("Attaching a new port...\n");
1956 if (identifier == NULL) {
1957 printf("Invalid parameters are specified\n");
1961 if (rte_eth_dev_attach(identifier, &pi))
1964 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1965 /* if socket_id is invalid, set to 0 */
1966 if (check_socket_id(socket_id) < 0)
1968 reconfig(pi, socket_id);
1969 rte_eth_promiscuous_enable(pi);
1971 nb_ports = rte_eth_dev_count_avail();
1973 ports[pi].port_status = RTE_PORT_STOPPED;
1975 update_fwd_ports(pi);
1977 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1982 detach_port(portid_t port_id)
1984 char name[RTE_ETH_NAME_MAX_LEN];
1986 printf("Detaching a port...\n");
1988 if (!port_is_closed(port_id)) {
1989 printf("Please close port first\n");
1993 if (ports[port_id].flow_list)
1994 port_flow_flush(port_id);
1996 if (rte_eth_dev_detach(port_id, name)) {
1997 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2001 nb_ports = rte_eth_dev_count_avail();
2003 update_fwd_ports(RTE_MAX_ETHPORTS);
2005 printf("Port %u is detached. Now total ports is %d\n",
2014 struct rte_device *device;
2019 stop_packet_forwarding();
2021 if (ports != NULL) {
2023 RTE_ETH_FOREACH_DEV(pt_id) {
2024 printf("\nShutting down port %d...\n", pt_id);
2030 * This is a workaround to fix a virtio-user issue that
2031 * requires to call clean-up routine to remove existing
2033 * This workaround valid only for testpmd, needs a fix
2034 * valid for all applications.
2035 * TODO: Implement proper resource cleanup
2037 device = rte_eth_devices[pt_id].device;
2038 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2044 ret = rte_dev_event_monitor_stop();
2047 "fail to stop device event monitor.");
2049 ret = eth_dev_event_callback_unregister();
2052 "fail to unregister all event callbacks.");
2055 printf("\nBye...\n");
2058 typedef void (*cmd_func_t)(void);
2059 struct pmd_test_command {
2060 const char *cmd_name;
2061 cmd_func_t cmd_func;
2064 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2066 /* Check the link status of all ports in up to 9s, and print them finally */
2068 check_all_ports_link_status(uint32_t port_mask)
2070 #define CHECK_INTERVAL 100 /* 100ms */
2071 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2073 uint8_t count, all_ports_up, print_flag = 0;
2074 struct rte_eth_link link;
2076 printf("Checking link statuses...\n");
2078 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2080 RTE_ETH_FOREACH_DEV(portid) {
2081 if ((port_mask & (1 << portid)) == 0)
2083 memset(&link, 0, sizeof(link));
2084 rte_eth_link_get_nowait(portid, &link);
2085 /* print link status if flag set */
2086 if (print_flag == 1) {
2087 if (link.link_status)
2089 "Port%d Link Up. speed %u Mbps- %s\n",
2090 portid, link.link_speed,
2091 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2092 ("full-duplex") : ("half-duplex\n"));
2094 printf("Port %d Link Down\n", portid);
2097 /* clear all_ports_up flag if any link down */
2098 if (link.link_status == ETH_LINK_DOWN) {
2103 /* after finally printing all link status, get out */
2104 if (print_flag == 1)
2107 if (all_ports_up == 0) {
2109 rte_delay_ms(CHECK_INTERVAL);
2112 /* set the print_flag if all ports up or timeout */
2113 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2123 rmv_event_callback(void *arg)
2125 int need_to_start = 0;
2126 int org_no_link_check = no_link_check;
2127 portid_t port_id = (intptr_t)arg;
2129 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2131 if (!test_done && port_is_forwarding(port_id)) {
2133 stop_packet_forwarding();
2137 no_link_check = org_no_link_check;
2138 close_port(port_id);
2139 detach_port(port_id);
2141 start_packet_forwarding(0);
2144 /* This function is used by the interrupt thread */
2146 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2149 static const char * const event_desc[] = {
2150 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2151 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2152 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2153 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2154 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2155 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2156 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2157 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2158 [RTE_ETH_EVENT_NEW] = "device probed",
2159 [RTE_ETH_EVENT_DESTROY] = "device released",
2160 [RTE_ETH_EVENT_MAX] = NULL,
2163 RTE_SET_USED(param);
2164 RTE_SET_USED(ret_param);
2166 if (type >= RTE_ETH_EVENT_MAX) {
2167 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2168 port_id, __func__, type);
2170 } else if (event_print_mask & (UINT32_C(1) << type)) {
2171 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2176 if (port_id_is_invalid(port_id, DISABLED_WARN))
2180 case RTE_ETH_EVENT_INTR_RMV:
2181 if (rte_eal_alarm_set(100000,
2182 rmv_event_callback, (void *)(intptr_t)port_id))
2183 fprintf(stderr, "Could not set up deferred device removal\n");
2191 /* This function is used by the interrupt thread */
2193 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2194 __rte_unused void *arg)
2196 if (type >= RTE_DEV_EVENT_MAX) {
2197 fprintf(stderr, "%s called upon invalid event %d\n",
2203 case RTE_DEV_EVENT_REMOVE:
2204 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2206 /* TODO: After finish failure handle, begin to stop
2207 * packet forward, stop port, close port, detach port.
2210 case RTE_DEV_EVENT_ADD:
2211 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2213 /* TODO: After finish kernel driver binding,
2214 * begin to attach port.
2223 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2227 uint8_t mapping_found = 0;
2229 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2230 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2231 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2232 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2233 tx_queue_stats_mappings[i].queue_id,
2234 tx_queue_stats_mappings[i].stats_counter_id);
2241 port->tx_queue_stats_mapping_enabled = 1;
2246 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2250 uint8_t mapping_found = 0;
2252 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2253 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2254 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2255 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2256 rx_queue_stats_mappings[i].queue_id,
2257 rx_queue_stats_mappings[i].stats_counter_id);
2264 port->rx_queue_stats_mapping_enabled = 1;
2269 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2273 diag = set_tx_queue_stats_mapping_registers(pi, port);
2275 if (diag == -ENOTSUP) {
2276 port->tx_queue_stats_mapping_enabled = 0;
2277 printf("TX queue stats mapping not supported port id=%d\n", pi);
2280 rte_exit(EXIT_FAILURE,
2281 "set_tx_queue_stats_mapping_registers "
2282 "failed for port id=%d diag=%d\n",
2286 diag = set_rx_queue_stats_mapping_registers(pi, port);
2288 if (diag == -ENOTSUP) {
2289 port->rx_queue_stats_mapping_enabled = 0;
2290 printf("RX queue stats mapping not supported port id=%d\n", pi);
2293 rte_exit(EXIT_FAILURE,
2294 "set_rx_queue_stats_mapping_registers "
2295 "failed for port id=%d diag=%d\n",
2301 rxtx_port_config(struct rte_port *port)
2305 for (qid = 0; qid < nb_rxq; qid++) {
2306 port->rx_conf[qid] = port->dev_info.default_rxconf;
2308 /* Check if any Rx parameters have been passed */
2309 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2310 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2312 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2313 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2315 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2316 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2318 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2319 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2321 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2322 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2324 port->nb_rx_desc[qid] = nb_rxd;
2327 for (qid = 0; qid < nb_txq; qid++) {
2328 port->tx_conf[qid] = port->dev_info.default_txconf;
2330 /* Check if any Tx parameters have been passed */
2331 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2332 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2334 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2335 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2337 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2338 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2340 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2341 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2343 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2344 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2346 port->nb_tx_desc[qid] = nb_txd;
2351 init_port_config(void)
2354 struct rte_port *port;
2356 RTE_ETH_FOREACH_DEV(pid) {
2358 port->dev_conf.fdir_conf = fdir_conf;
2359 rte_eth_dev_info_get(pid, &port->dev_info);
2361 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2362 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2363 rss_hf & port->dev_info.flow_type_rss_offloads;
2365 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2366 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2369 if (port->dcb_flag == 0) {
2370 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2371 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2373 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2376 rxtx_port_config(port);
2378 rte_eth_macaddr_get(pid, &port->eth_addr);
2380 map_port_queue_stats_mapping_registers(pid, port);
2381 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2382 rte_pmd_ixgbe_bypass_init(pid);
2385 if (lsc_interrupt &&
2386 (rte_eth_devices[pid].data->dev_flags &
2387 RTE_ETH_DEV_INTR_LSC))
2388 port->dev_conf.intr_conf.lsc = 1;
2389 if (rmv_interrupt &&
2390 (rte_eth_devices[pid].data->dev_flags &
2391 RTE_ETH_DEV_INTR_RMV))
2392 port->dev_conf.intr_conf.rmv = 1;
2394 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2395 /* Detect softnic port */
2396 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2397 port->softnic_enable = 1;
2398 memset(&port->softport, 0, sizeof(struct softnic_port));
2400 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2401 port->softport.tm_flag = 1;
2407 void set_port_slave_flag(portid_t slave_pid)
2409 struct rte_port *port;
2411 port = &ports[slave_pid];
2412 port->slave_flag = 1;
2415 void clear_port_slave_flag(portid_t slave_pid)
2417 struct rte_port *port;
2419 port = &ports[slave_pid];
2420 port->slave_flag = 0;
2423 uint8_t port_is_bonding_slave(portid_t slave_pid)
2425 struct rte_port *port;
2427 port = &ports[slave_pid];
2428 if ((rte_eth_devices[slave_pid].data->dev_flags &
2429 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2434 const uint16_t vlan_tags[] = {
2435 0, 1, 2, 3, 4, 5, 6, 7,
2436 8, 9, 10, 11, 12, 13, 14, 15,
2437 16, 17, 18, 19, 20, 21, 22, 23,
2438 24, 25, 26, 27, 28, 29, 30, 31
2442 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2443 enum dcb_mode_enable dcb_mode,
2444 enum rte_eth_nb_tcs num_tcs,
2450 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2451 * given above, and the number of traffic classes available for use.
2453 if (dcb_mode == DCB_VT_ENABLED) {
2454 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2455 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2456 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2457 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2459 /* VMDQ+DCB RX and TX configurations */
2460 vmdq_rx_conf->enable_default_pool = 0;
2461 vmdq_rx_conf->default_pool = 0;
2462 vmdq_rx_conf->nb_queue_pools =
2463 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2464 vmdq_tx_conf->nb_queue_pools =
2465 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2467 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2468 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2469 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2470 vmdq_rx_conf->pool_map[i].pools =
2471 1 << (i % vmdq_rx_conf->nb_queue_pools);
2473 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2474 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2475 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2478 /* set DCB mode of RX and TX of multiple queues */
2479 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2480 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2482 struct rte_eth_dcb_rx_conf *rx_conf =
2483 ð_conf->rx_adv_conf.dcb_rx_conf;
2484 struct rte_eth_dcb_tx_conf *tx_conf =
2485 ð_conf->tx_adv_conf.dcb_tx_conf;
2487 rx_conf->nb_tcs = num_tcs;
2488 tx_conf->nb_tcs = num_tcs;
2490 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2491 rx_conf->dcb_tc[i] = i % num_tcs;
2492 tx_conf->dcb_tc[i] = i % num_tcs;
2494 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2495 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2496 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2500 eth_conf->dcb_capability_en =
2501 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2503 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2509 init_port_dcb_config(portid_t pid,
2510 enum dcb_mode_enable dcb_mode,
2511 enum rte_eth_nb_tcs num_tcs,
2514 struct rte_eth_conf port_conf;
2515 struct rte_port *rte_port;
2519 rte_port = &ports[pid];
2521 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2522 /* Enter DCB configuration status */
2525 port_conf.rxmode = rte_port->dev_conf.rxmode;
2526 port_conf.txmode = rte_port->dev_conf.txmode;
2528 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2529 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2532 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2534 /* re-configure the device . */
2535 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2537 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2539 /* If dev_info.vmdq_pool_base is greater than 0,
2540 * the queue id of vmdq pools is started after pf queues.
2542 if (dcb_mode == DCB_VT_ENABLED &&
2543 rte_port->dev_info.vmdq_pool_base > 0) {
2544 printf("VMDQ_DCB multi-queue mode is nonsensical"
2545 " for port %d.", pid);
2549 /* Assume the ports in testpmd have the same dcb capability
2550 * and has the same number of rxq and txq in dcb mode
2552 if (dcb_mode == DCB_VT_ENABLED) {
2553 if (rte_port->dev_info.max_vfs > 0) {
2554 nb_rxq = rte_port->dev_info.nb_rx_queues;
2555 nb_txq = rte_port->dev_info.nb_tx_queues;
2557 nb_rxq = rte_port->dev_info.max_rx_queues;
2558 nb_txq = rte_port->dev_info.max_tx_queues;
2561 /*if vt is disabled, use all pf queues */
2562 if (rte_port->dev_info.vmdq_pool_base == 0) {
2563 nb_rxq = rte_port->dev_info.max_rx_queues;
2564 nb_txq = rte_port->dev_info.max_tx_queues;
2566 nb_rxq = (queueid_t)num_tcs;
2567 nb_txq = (queueid_t)num_tcs;
2571 rx_free_thresh = 64;
2573 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2575 rxtx_port_config(rte_port);
2577 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2578 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2579 rx_vft_set(pid, vlan_tags[i], 1);
2581 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2582 map_port_queue_stats_mapping_registers(pid, rte_port);
2584 rte_port->dcb_flag = 1;
2592 /* Configuration of Ethernet ports. */
2593 ports = rte_zmalloc("testpmd: ports",
2594 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2595 RTE_CACHE_LINE_SIZE);
2596 if (ports == NULL) {
2597 rte_exit(EXIT_FAILURE,
2598 "rte_zmalloc(%d struct rte_port) failed\n",
2614 const char clr[] = { 27, '[', '2', 'J', '\0' };
2615 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2617 /* Clear screen and move to top left */
2618 printf("%s%s", clr, top_left);
2620 printf("\nPort statistics ====================================");
2621 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2622 nic_stats_display(fwd_ports_ids[i]);
2626 signal_handler(int signum)
2628 if (signum == SIGINT || signum == SIGTERM) {
2629 printf("\nSignal %d received, preparing to exit...\n",
2631 #ifdef RTE_LIBRTE_PDUMP
2632 /* uninitialize packet capture framework */
2635 #ifdef RTE_LIBRTE_LATENCY_STATS
2636 rte_latencystats_uninit();
2639 /* Set flag to indicate the force termination. */
2641 /* exit with the expected status */
2642 signal(signum, SIG_DFL);
2643 kill(getpid(), signum);
2648 main(int argc, char** argv)
2654 signal(SIGINT, signal_handler);
2655 signal(SIGTERM, signal_handler);
2657 diag = rte_eal_init(argc, argv);
2659 rte_panic("Cannot init EAL\n");
2661 testpmd_logtype = rte_log_register("testpmd");
2662 if (testpmd_logtype < 0)
2663 rte_panic("Cannot register log type");
2664 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2666 #ifdef RTE_LIBRTE_PDUMP
2667 /* initialize packet capture framework */
2668 rte_pdump_init(NULL);
2671 nb_ports = (portid_t) rte_eth_dev_count_avail();
2673 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2675 /* allocate port structures, and init them */
2678 set_def_fwd_config();
2680 rte_panic("Empty set of forwarding logical cores - check the "
2681 "core mask supplied in the command parameters\n");
2683 /* Bitrate/latency stats disabled by default */
2684 #ifdef RTE_LIBRTE_BITRATE
2685 bitrate_enabled = 0;
2687 #ifdef RTE_LIBRTE_LATENCY_STATS
2688 latencystats_enabled = 0;
2691 /* on FreeBSD, mlockall() is disabled by default */
2692 #ifdef RTE_EXEC_ENV_BSDAPP
2701 launch_args_parse(argc, argv);
2703 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2704 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2708 if (tx_first && interactive)
2709 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2710 "interactive mode.\n");
2712 if (tx_first && lsc_interrupt) {
2713 printf("Warning: lsc_interrupt needs to be off when "
2714 " using tx_first. Disabling.\n");
2718 if (!nb_rxq && !nb_txq)
2719 printf("Warning: Either rx or tx queues should be non-zero\n");
2721 if (nb_rxq > 1 && nb_rxq > nb_txq)
2722 printf("Warning: nb_rxq=%d enables RSS configuration, "
2723 "but nb_txq=%d will prevent to fully test it.\n",
2729 /* enable hot plug monitoring */
2730 ret = rte_dev_event_monitor_start();
2735 eth_dev_event_callback_register();
2739 if (start_port(RTE_PORT_ALL) != 0)
2740 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2742 /* set all ports to promiscuous mode by default */
2743 RTE_ETH_FOREACH_DEV(port_id)
2744 rte_eth_promiscuous_enable(port_id);
2746 /* Init metrics library */
2747 rte_metrics_init(rte_socket_id());
2749 #ifdef RTE_LIBRTE_LATENCY_STATS
2750 if (latencystats_enabled != 0) {
2751 int ret = rte_latencystats_init(1, NULL);
2753 printf("Warning: latencystats init()"
2754 " returned error %d\n", ret);
2755 printf("Latencystats running on lcore %d\n",
2756 latencystats_lcore_id);
2760 /* Setup bitrate stats */
2761 #ifdef RTE_LIBRTE_BITRATE
2762 if (bitrate_enabled != 0) {
2763 bitrate_data = rte_stats_bitrate_create();
2764 if (bitrate_data == NULL)
2765 rte_exit(EXIT_FAILURE,
2766 "Could not allocate bitrate data.\n");
2767 rte_stats_bitrate_reg(bitrate_data);
2771 #ifdef RTE_LIBRTE_CMDLINE
2772 if (strlen(cmdline_filename) != 0)
2773 cmdline_read_from_file(cmdline_filename);
2775 if (interactive == 1) {
2777 printf("Start automatic packet forwarding\n");
2778 start_packet_forwarding(0);
2790 printf("No commandline core given, start packet forwarding\n");
2791 start_packet_forwarding(tx_first);
2792 if (stats_period != 0) {
2793 uint64_t prev_time = 0, cur_time, diff_time = 0;
2794 uint64_t timer_period;
2796 /* Convert to number of cycles */
2797 timer_period = stats_period * rte_get_timer_hz();
2799 while (f_quit == 0) {
2800 cur_time = rte_get_timer_cycles();
2801 diff_time += cur_time - prev_time;
2803 if (diff_time >= timer_period) {
2805 /* Reset the timer */
2808 /* Sleep to avoid unnecessary checks */
2809 prev_time = cur_time;
2814 printf("Press enter to exit\n");
2815 rc = read(0, &c, 1);