1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC
161 #ifdef RTE_LIBRTE_IEEE1588
162 &ieee1588_fwd_engine,
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
175 * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179 * In container, it cannot terminate the process which running with 'stats-period'
180 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
213 * Defaults are supplied by drivers via ethdev.
215 #define RTE_TEST_RX_DESC_DEFAULT 0
216 #define RTE_TEST_TX_DESC_DEFAULT 0
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 #define RTE_PMD_PARAM_UNSET -1
222 * Configurable values of RX and TX ring threshold registers.
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX free threshold.
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
239 * Configurable value of RX drop enable.
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX free threshold.
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX RS bit threshold.
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Flow API isolated mode.
271 uint8_t flow_isolate_all;
274 * Avoids to check link status when starting/stopping a port.
276 uint8_t no_link_check = 0; /* check by default */
279 * Enable link status change notification
281 uint8_t lsc_interrupt = 1; /* enabled by default */
284 * Enable device removal notification.
286 uint8_t rmv_interrupt = 1; /* enabled by default */
288 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
291 * Display or mask ether events
292 * Default to all events except VF_MBOX
294 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
295 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
296 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
298 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
299 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 * Decide if all memory are locked for performance.
307 * NIC bypass mode configuration options.
310 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
311 /* The NIC bypass watchdog timeout. */
312 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
316 #ifdef RTE_LIBRTE_LATENCY_STATS
319 * Set when latency stats is enabled in the commandline
321 uint8_t latencystats_enabled;
324 * Lcore ID to serive latency statistics.
326 lcoreid_t latencystats_lcore_id = -1;
331 * Ethernet device configuration.
333 struct rte_eth_rxmode rx_mode = {
334 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
335 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
338 struct rte_eth_txmode tx_mode = {
339 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 struct rte_fdir_conf fdir_conf = {
343 .mode = RTE_FDIR_MODE_NONE,
344 .pballoc = RTE_FDIR_PBALLOC_64K,
345 .status = RTE_FDIR_REPORT_STATUS,
347 .vlan_tci_mask = 0xFFEF,
349 .src_ip = 0xFFFFFFFF,
350 .dst_ip = 0xFFFFFFFF,
353 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 .src_port_mask = 0xFFFF,
357 .dst_port_mask = 0xFFFF,
358 .mac_addr_byte_mask = 0xFF,
359 .tunnel_type_mask = 1,
360 .tunnel_id_mask = 0xFFFFFFFF,
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
377 * Display zero values by default for xstats
379 uint8_t xstats_hide_zero;
381 unsigned int num_sockets = 0;
382 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
384 #ifdef RTE_LIBRTE_BITRATE
385 /* Bitrate statistics */
386 struct rte_stats_bitrates *bitrate_data;
387 lcoreid_t bitrate_lcore_id;
388 uint8_t bitrate_enabled;
391 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
392 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
394 struct vxlan_encap_conf vxlan_encap_conf = {
397 .vni = "\x00\x00\x00",
399 .udp_dst = RTE_BE16(4789),
400 .ipv4_src = IPv4(127, 0, 0, 1),
401 .ipv4_dst = IPv4(255, 255, 255, 255),
402 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
403 "\x00\x00\x00\x00\x00\x00\x00\x01",
404 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
405 "\x00\x00\x00\x00\x00\x00\x11\x11",
407 .eth_src = "\x00\x00\x00\x00\x00\x00",
408 .eth_dst = "\xff\xff\xff\xff\xff\xff",
411 struct nvgre_encap_conf nvgre_encap_conf = {
414 .tni = "\x00\x00\x00",
415 .ipv4_src = IPv4(127, 0, 0, 1),
416 .ipv4_dst = IPv4(255, 255, 255, 255),
417 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
418 "\x00\x00\x00\x00\x00\x00\x00\x01",
419 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
420 "\x00\x00\x00\x00\x00\x00\x11\x11",
422 .eth_src = "\x00\x00\x00\x00\x00\x00",
423 .eth_dst = "\xff\xff\xff\xff\xff\xff",
426 /* Forward function declarations */
427 static void map_port_queue_stats_mapping_registers(portid_t pi,
428 struct rte_port *port);
429 static void check_all_ports_link_status(uint32_t port_mask);
430 static int eth_event_callback(portid_t port_id,
431 enum rte_eth_event_type type,
432 void *param, void *ret_param);
433 static void eth_dev_event_callback(char *device_name,
434 enum rte_dev_event_type type,
436 static int eth_dev_event_callback_register(void);
437 static int eth_dev_event_callback_unregister(void);
441 * Check if all the ports are started.
442 * If yes, return positive value. If not, return zero.
444 static int all_ports_started(void);
446 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
447 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
450 * Helper function to check if socket is already discovered.
451 * If yes, return positive value. If not, return zero.
454 new_socket_id(unsigned int socket_id)
458 for (i = 0; i < num_sockets; i++) {
459 if (socket_ids[i] == socket_id)
466 * Setup default configuration.
469 set_default_fwd_lcores_config(void)
473 unsigned int sock_num;
476 for (i = 0; i < RTE_MAX_LCORE; i++) {
477 sock_num = rte_lcore_to_socket_id(i);
478 if (new_socket_id(sock_num)) {
479 if (num_sockets >= RTE_MAX_NUMA_NODES) {
480 rte_exit(EXIT_FAILURE,
481 "Total sockets greater than %u\n",
484 socket_ids[num_sockets++] = sock_num;
486 if (!rte_lcore_is_enabled(i))
488 if (i == rte_get_master_lcore())
490 fwd_lcores_cpuids[nb_lc++] = i;
492 nb_lcores = (lcoreid_t) nb_lc;
493 nb_cfg_lcores = nb_lcores;
498 set_def_peer_eth_addrs(void)
502 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
503 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
504 peer_eth_addrs[i].addr_bytes[5] = i;
509 set_default_fwd_ports_config(void)
514 RTE_ETH_FOREACH_DEV(pt_id)
515 fwd_ports_ids[i++] = pt_id;
517 nb_cfg_ports = nb_ports;
518 nb_fwd_ports = nb_ports;
522 set_def_fwd_config(void)
524 set_default_fwd_lcores_config();
525 set_def_peer_eth_addrs();
526 set_default_fwd_ports_config();
530 * Configuration initialisation done once at init time.
533 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
534 unsigned int socket_id)
536 char pool_name[RTE_MEMPOOL_NAMESIZE];
537 struct rte_mempool *rte_mp = NULL;
540 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
541 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
544 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
545 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
548 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
549 mb_size, (unsigned) mb_mempool_cache,
550 sizeof(struct rte_pktmbuf_pool_private),
555 if (rte_mempool_populate_anon(rte_mp) == 0) {
556 rte_mempool_free(rte_mp);
560 rte_pktmbuf_pool_init(rte_mp, NULL);
561 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
563 /* wrapper to rte_mempool_create() */
564 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
565 rte_mbuf_best_mempool_ops());
566 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
567 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
571 if (rte_mp == NULL) {
572 rte_exit(EXIT_FAILURE,
573 "Creation of mbuf pool for socket %u failed: %s\n",
574 socket_id, rte_strerror(rte_errno));
575 } else if (verbose_level > 0) {
576 rte_mempool_dump(stdout, rte_mp);
581 * Check given socket id is valid or not with NUMA mode,
582 * if valid, return 0, else return -1
585 check_socket_id(const unsigned int socket_id)
587 static int warning_once = 0;
589 if (new_socket_id(socket_id)) {
590 if (!warning_once && numa_support)
591 printf("Warning: NUMA should be configured manually by"
592 " using --port-numa-config and"
593 " --ring-numa-config parameters along with"
602 * Get the allowed maximum number of RX queues.
603 * *pid return the port id which has minimal value of
604 * max_rx_queues in all ports.
607 get_allowed_max_nb_rxq(portid_t *pid)
609 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
611 struct rte_eth_dev_info dev_info;
613 RTE_ETH_FOREACH_DEV(pi) {
614 rte_eth_dev_info_get(pi, &dev_info);
615 if (dev_info.max_rx_queues < allowed_max_rxq) {
616 allowed_max_rxq = dev_info.max_rx_queues;
620 return allowed_max_rxq;
624 * Check input rxq is valid or not.
625 * If input rxq is not greater than any of maximum number
626 * of RX queues of all ports, it is valid.
627 * if valid, return 0, else return -1
630 check_nb_rxq(queueid_t rxq)
632 queueid_t allowed_max_rxq;
635 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
636 if (rxq > allowed_max_rxq) {
637 printf("Fail: input rxq (%u) can't be greater "
638 "than max_rx_queues (%u) of port %u\n",
648 * Get the allowed maximum number of TX queues.
649 * *pid return the port id which has minimal value of
650 * max_tx_queues in all ports.
653 get_allowed_max_nb_txq(portid_t *pid)
655 queueid_t allowed_max_txq = MAX_QUEUE_ID;
657 struct rte_eth_dev_info dev_info;
659 RTE_ETH_FOREACH_DEV(pi) {
660 rte_eth_dev_info_get(pi, &dev_info);
661 if (dev_info.max_tx_queues < allowed_max_txq) {
662 allowed_max_txq = dev_info.max_tx_queues;
666 return allowed_max_txq;
670 * Check input txq is valid or not.
671 * If input txq is not greater than any of maximum number
672 * of TX queues of all ports, it is valid.
673 * if valid, return 0, else return -1
676 check_nb_txq(queueid_t txq)
678 queueid_t allowed_max_txq;
681 allowed_max_txq = get_allowed_max_nb_txq(&pid);
682 if (txq > allowed_max_txq) {
683 printf("Fail: input txq (%u) can't be greater "
684 "than max_tx_queues (%u) of port %u\n",
697 struct rte_port *port;
698 struct rte_mempool *mbp;
699 unsigned int nb_mbuf_per_pool;
701 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
702 struct rte_gro_param gro_param;
706 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
709 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
710 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
711 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
714 /* Configuration of logical cores. */
715 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
716 sizeof(struct fwd_lcore *) * nb_lcores,
717 RTE_CACHE_LINE_SIZE);
718 if (fwd_lcores == NULL) {
719 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
720 "failed\n", nb_lcores);
722 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
723 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
724 sizeof(struct fwd_lcore),
725 RTE_CACHE_LINE_SIZE);
726 if (fwd_lcores[lc_id] == NULL) {
727 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
730 fwd_lcores[lc_id]->cpuid_idx = lc_id;
733 RTE_ETH_FOREACH_DEV(pid) {
735 /* Apply default TxRx configuration for all ports */
736 port->dev_conf.txmode = tx_mode;
737 port->dev_conf.rxmode = rx_mode;
738 rte_eth_dev_info_get(pid, &port->dev_info);
740 if (!(port->dev_info.rx_offload_capa &
741 DEV_RX_OFFLOAD_CRC_STRIP))
742 port->dev_conf.rxmode.offloads &=
743 ~DEV_RX_OFFLOAD_CRC_STRIP;
744 if (!(port->dev_info.tx_offload_capa &
745 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
746 port->dev_conf.txmode.offloads &=
747 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
749 if (port_numa[pid] != NUMA_NO_CONFIG)
750 port_per_socket[port_numa[pid]]++;
752 uint32_t socket_id = rte_eth_dev_socket_id(pid);
754 /* if socket_id is invalid, set to 0 */
755 if (check_socket_id(socket_id) < 0)
757 port_per_socket[socket_id]++;
761 /* Apply Rx offloads configuration */
762 for (k = 0; k < port->dev_info.max_rx_queues; k++)
763 port->rx_conf[k].offloads =
764 port->dev_conf.rxmode.offloads;
765 /* Apply Tx offloads configuration */
766 for (k = 0; k < port->dev_info.max_tx_queues; k++)
767 port->tx_conf[k].offloads =
768 port->dev_conf.txmode.offloads;
770 /* set flag to initialize port/queue */
771 port->need_reconfig = 1;
772 port->need_reconfig_queues = 1;
776 * Create pools of mbuf.
777 * If NUMA support is disabled, create a single pool of mbuf in
778 * socket 0 memory by default.
779 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
781 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
782 * nb_txd can be configured at run time.
784 if (param_total_num_mbufs)
785 nb_mbuf_per_pool = param_total_num_mbufs;
787 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
788 (nb_lcores * mb_mempool_cache) +
789 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
790 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
796 for (i = 0; i < num_sockets; i++)
797 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
800 if (socket_num == UMA_NO_CONFIG)
801 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
803 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
809 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
810 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
812 * Records which Mbuf pool to use by each logical core, if needed.
814 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
815 mbp = mbuf_pool_find(
816 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
819 mbp = mbuf_pool_find(0);
820 fwd_lcores[lc_id]->mbp = mbp;
821 /* initialize GSO context */
822 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
823 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
824 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
825 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
827 fwd_lcores[lc_id]->gso_ctx.flag = 0;
830 /* Configuration of packet forwarding streams. */
831 if (init_fwd_streams() < 0)
832 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
836 /* create a gro context for each lcore */
837 gro_param.gro_types = RTE_GRO_TCP_IPV4;
838 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
839 gro_param.max_item_per_flow = MAX_PKT_BURST;
840 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
841 gro_param.socket_id = rte_lcore_to_socket_id(
842 fwd_lcores_cpuids[lc_id]);
843 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
844 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
845 rte_exit(EXIT_FAILURE,
846 "rte_gro_ctx_create() failed\n");
850 #if defined RTE_LIBRTE_PMD_SOFTNIC
851 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
852 RTE_ETH_FOREACH_DEV(pid) {
854 const char *driver = port->dev_info.driver_name;
856 if (strcmp(driver, "net_softnic") == 0)
857 port->softport.fwd_lcore_arg = fwd_lcores;
866 reconfig(portid_t new_port_id, unsigned socket_id)
868 struct rte_port *port;
870 /* Reconfiguration of Ethernet ports. */
871 port = &ports[new_port_id];
872 rte_eth_dev_info_get(new_port_id, &port->dev_info);
874 /* set flag to initialize port/queue */
875 port->need_reconfig = 1;
876 port->need_reconfig_queues = 1;
877 port->socket_id = socket_id;
884 init_fwd_streams(void)
887 struct rte_port *port;
888 streamid_t sm_id, nb_fwd_streams_new;
891 /* set socket id according to numa or not */
892 RTE_ETH_FOREACH_DEV(pid) {
894 if (nb_rxq > port->dev_info.max_rx_queues) {
895 printf("Fail: nb_rxq(%d) is greater than "
896 "max_rx_queues(%d)\n", nb_rxq,
897 port->dev_info.max_rx_queues);
900 if (nb_txq > port->dev_info.max_tx_queues) {
901 printf("Fail: nb_txq(%d) is greater than "
902 "max_tx_queues(%d)\n", nb_txq,
903 port->dev_info.max_tx_queues);
907 if (port_numa[pid] != NUMA_NO_CONFIG)
908 port->socket_id = port_numa[pid];
910 port->socket_id = rte_eth_dev_socket_id(pid);
912 /* if socket_id is invalid, set to 0 */
913 if (check_socket_id(port->socket_id) < 0)
918 if (socket_num == UMA_NO_CONFIG)
921 port->socket_id = socket_num;
925 q = RTE_MAX(nb_rxq, nb_txq);
927 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
930 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
931 if (nb_fwd_streams_new == nb_fwd_streams)
934 if (fwd_streams != NULL) {
935 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
936 if (fwd_streams[sm_id] == NULL)
938 rte_free(fwd_streams[sm_id]);
939 fwd_streams[sm_id] = NULL;
941 rte_free(fwd_streams);
946 nb_fwd_streams = nb_fwd_streams_new;
947 if (nb_fwd_streams) {
948 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
949 sizeof(struct fwd_stream *) * nb_fwd_streams,
950 RTE_CACHE_LINE_SIZE);
951 if (fwd_streams == NULL)
952 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
953 " (struct fwd_stream *)) failed\n",
956 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
957 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
958 " struct fwd_stream", sizeof(struct fwd_stream),
959 RTE_CACHE_LINE_SIZE);
960 if (fwd_streams[sm_id] == NULL)
961 rte_exit(EXIT_FAILURE, "rte_zmalloc"
962 "(struct fwd_stream) failed\n");
969 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
971 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
973 unsigned int total_burst;
974 unsigned int nb_burst;
975 unsigned int burst_stats[3];
976 uint16_t pktnb_stats[3];
978 int burst_percent[3];
981 * First compute the total number of packet bursts and the
982 * two highest numbers of bursts of the same number of packets.
985 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
986 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
987 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
988 nb_burst = pbs->pkt_burst_spread[nb_pkt];
991 total_burst += nb_burst;
992 if (nb_burst > burst_stats[0]) {
993 burst_stats[1] = burst_stats[0];
994 pktnb_stats[1] = pktnb_stats[0];
995 burst_stats[0] = nb_burst;
996 pktnb_stats[0] = nb_pkt;
997 } else if (nb_burst > burst_stats[1]) {
998 burst_stats[1] = nb_burst;
999 pktnb_stats[1] = nb_pkt;
1002 if (total_burst == 0)
1004 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1005 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1006 burst_percent[0], (int) pktnb_stats[0]);
1007 if (burst_stats[0] == total_burst) {
1011 if (burst_stats[0] + burst_stats[1] == total_burst) {
1012 printf(" + %d%% of %d pkts]\n",
1013 100 - burst_percent[0], pktnb_stats[1]);
1016 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1017 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1018 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1019 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1022 printf(" + %d%% of %d pkts + %d%% of others]\n",
1023 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1025 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1028 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1030 struct rte_port *port;
1033 static const char *fwd_stats_border = "----------------------";
1035 port = &ports[port_id];
1036 printf("\n %s Forward statistics for port %-2d %s\n",
1037 fwd_stats_border, port_id, fwd_stats_border);
1039 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1040 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1042 stats->ipackets, stats->imissed,
1043 (uint64_t) (stats->ipackets + stats->imissed));
1045 if (cur_fwd_eng == &csum_fwd_engine)
1046 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1047 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1048 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1049 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1050 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1053 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1055 stats->opackets, port->tx_dropped,
1056 (uint64_t) (stats->opackets + port->tx_dropped));
1059 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1061 stats->ipackets, stats->imissed,
1062 (uint64_t) (stats->ipackets + stats->imissed));
1064 if (cur_fwd_eng == &csum_fwd_engine)
1065 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1066 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1067 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1068 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1069 printf(" RX-nombufs: %14"PRIu64"\n",
1073 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1075 stats->opackets, port->tx_dropped,
1076 (uint64_t) (stats->opackets + port->tx_dropped));
1079 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1080 if (port->rx_stream)
1081 pkt_burst_stats_display("RX",
1082 &port->rx_stream->rx_burst_stats);
1083 if (port->tx_stream)
1084 pkt_burst_stats_display("TX",
1085 &port->tx_stream->tx_burst_stats);
1088 if (port->rx_queue_stats_mapping_enabled) {
1090 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1091 printf(" Stats reg %2d RX-packets:%14"PRIu64
1092 " RX-errors:%14"PRIu64
1093 " RX-bytes:%14"PRIu64"\n",
1094 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1098 if (port->tx_queue_stats_mapping_enabled) {
1099 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1100 printf(" Stats reg %2d TX-packets:%14"PRIu64
1101 " TX-bytes:%14"PRIu64"\n",
1102 i, stats->q_opackets[i], stats->q_obytes[i]);
1106 printf(" %s--------------------------------%s\n",
1107 fwd_stats_border, fwd_stats_border);
1111 fwd_stream_stats_display(streamid_t stream_id)
1113 struct fwd_stream *fs;
1114 static const char *fwd_top_stats_border = "-------";
1116 fs = fwd_streams[stream_id];
1117 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1118 (fs->fwd_dropped == 0))
1120 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1121 "TX Port=%2d/Queue=%2d %s\n",
1122 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1123 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1124 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1125 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1127 /* if checksum mode */
1128 if (cur_fwd_eng == &csum_fwd_engine) {
1129 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1130 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1133 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1134 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1135 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1140 flush_fwd_rx_queues(void)
1142 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1149 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1150 uint64_t timer_period;
1152 /* convert to number of cycles */
1153 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1155 for (j = 0; j < 2; j++) {
1156 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1157 for (rxq = 0; rxq < nb_rxq; rxq++) {
1158 port_id = fwd_ports_ids[rxp];
1160 * testpmd can stuck in the below do while loop
1161 * if rte_eth_rx_burst() always returns nonzero
1162 * packets. So timer is added to exit this loop
1163 * after 1sec timer expiry.
1165 prev_tsc = rte_rdtsc();
1167 nb_rx = rte_eth_rx_burst(port_id, rxq,
1168 pkts_burst, MAX_PKT_BURST);
1169 for (i = 0; i < nb_rx; i++)
1170 rte_pktmbuf_free(pkts_burst[i]);
1172 cur_tsc = rte_rdtsc();
1173 diff_tsc = cur_tsc - prev_tsc;
1174 timer_tsc += diff_tsc;
1175 } while ((nb_rx > 0) &&
1176 (timer_tsc < timer_period));
1180 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1185 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1187 struct fwd_stream **fsm;
1190 #ifdef RTE_LIBRTE_BITRATE
1191 uint64_t tics_per_1sec;
1192 uint64_t tics_datum;
1193 uint64_t tics_current;
1196 tics_datum = rte_rdtsc();
1197 tics_per_1sec = rte_get_timer_hz();
1199 fsm = &fwd_streams[fc->stream_idx];
1200 nb_fs = fc->stream_nb;
1202 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1203 (*pkt_fwd)(fsm[sm_id]);
1204 #ifdef RTE_LIBRTE_BITRATE
1205 if (bitrate_enabled != 0 &&
1206 bitrate_lcore_id == rte_lcore_id()) {
1207 tics_current = rte_rdtsc();
1208 if (tics_current - tics_datum >= tics_per_1sec) {
1209 /* Periodic bitrate calculation */
1210 RTE_ETH_FOREACH_DEV(idx_port)
1211 rte_stats_bitrate_calc(bitrate_data,
1213 tics_datum = tics_current;
1217 #ifdef RTE_LIBRTE_LATENCY_STATS
1218 if (latencystats_enabled != 0 &&
1219 latencystats_lcore_id == rte_lcore_id())
1220 rte_latencystats_update();
1223 } while (! fc->stopped);
1227 start_pkt_forward_on_core(void *fwd_arg)
1229 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1230 cur_fwd_config.fwd_eng->packet_fwd);
1235 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1236 * Used to start communication flows in network loopback test configurations.
1239 run_one_txonly_burst_on_core(void *fwd_arg)
1241 struct fwd_lcore *fwd_lc;
1242 struct fwd_lcore tmp_lcore;
1244 fwd_lc = (struct fwd_lcore *) fwd_arg;
1245 tmp_lcore = *fwd_lc;
1246 tmp_lcore.stopped = 1;
1247 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1252 * Launch packet forwarding:
1253 * - Setup per-port forwarding context.
1254 * - launch logical cores with their forwarding configuration.
1257 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1259 port_fwd_begin_t port_fwd_begin;
1264 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1265 if (port_fwd_begin != NULL) {
1266 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1267 (*port_fwd_begin)(fwd_ports_ids[i]);
1269 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1270 lc_id = fwd_lcores_cpuids[i];
1271 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1272 fwd_lcores[i]->stopped = 0;
1273 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1274 fwd_lcores[i], lc_id);
1276 printf("launch lcore %u failed - diag=%d\n",
1283 * Update the forward ports list.
1286 update_fwd_ports(portid_t new_pid)
1289 unsigned int new_nb_fwd_ports = 0;
1292 for (i = 0; i < nb_fwd_ports; ++i) {
1293 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1296 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1300 if (new_pid < RTE_MAX_ETHPORTS)
1301 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1303 nb_fwd_ports = new_nb_fwd_ports;
1304 nb_cfg_ports = new_nb_fwd_ports;
1308 * Launch packet forwarding configuration.
1311 start_packet_forwarding(int with_tx_first)
1313 port_fwd_begin_t port_fwd_begin;
1314 port_fwd_end_t port_fwd_end;
1315 struct rte_port *port;
1320 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1321 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1323 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1324 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1326 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1327 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1328 (!nb_rxq || !nb_txq))
1329 rte_exit(EXIT_FAILURE,
1330 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1331 cur_fwd_eng->fwd_mode_name);
1333 if (all_ports_started() == 0) {
1334 printf("Not all ports were started\n");
1337 if (test_done == 0) {
1338 printf("Packet forwarding already started\n");
1344 for (i = 0; i < nb_fwd_ports; i++) {
1345 pt_id = fwd_ports_ids[i];
1346 port = &ports[pt_id];
1347 if (!port->dcb_flag) {
1348 printf("In DCB mode, all forwarding ports must "
1349 "be configured in this mode.\n");
1353 if (nb_fwd_lcores == 1) {
1354 printf("In DCB mode,the nb forwarding cores "
1355 "should be larger than 1.\n");
1364 flush_fwd_rx_queues();
1366 pkt_fwd_config_display(&cur_fwd_config);
1367 rxtx_config_display();
1369 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1370 pt_id = fwd_ports_ids[i];
1371 port = &ports[pt_id];
1372 rte_eth_stats_get(pt_id, &port->stats);
1373 port->tx_dropped = 0;
1375 map_port_queue_stats_mapping_registers(pt_id, port);
1377 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1378 fwd_streams[sm_id]->rx_packets = 0;
1379 fwd_streams[sm_id]->tx_packets = 0;
1380 fwd_streams[sm_id]->fwd_dropped = 0;
1381 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1382 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1384 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1385 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1386 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1387 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1388 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1390 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1391 fwd_streams[sm_id]->core_cycles = 0;
1394 if (with_tx_first) {
1395 port_fwd_begin = tx_only_engine.port_fwd_begin;
1396 if (port_fwd_begin != NULL) {
1397 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1398 (*port_fwd_begin)(fwd_ports_ids[i]);
1400 while (with_tx_first--) {
1401 launch_packet_forwarding(
1402 run_one_txonly_burst_on_core);
1403 rte_eal_mp_wait_lcore();
1405 port_fwd_end = tx_only_engine.port_fwd_end;
1406 if (port_fwd_end != NULL) {
1407 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1408 (*port_fwd_end)(fwd_ports_ids[i]);
1411 launch_packet_forwarding(start_pkt_forward_on_core);
1415 stop_packet_forwarding(void)
1417 struct rte_eth_stats stats;
1418 struct rte_port *port;
1419 port_fwd_end_t port_fwd_end;
1424 uint64_t total_recv;
1425 uint64_t total_xmit;
1426 uint64_t total_rx_dropped;
1427 uint64_t total_tx_dropped;
1428 uint64_t total_rx_nombuf;
1429 uint64_t tx_dropped;
1430 uint64_t rx_bad_ip_csum;
1431 uint64_t rx_bad_l4_csum;
1432 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1433 uint64_t fwd_cycles;
1436 static const char *acc_stats_border = "+++++++++++++++";
1439 printf("Packet forwarding not started\n");
1442 printf("Telling cores to stop...");
1443 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1444 fwd_lcores[lc_id]->stopped = 1;
1445 printf("\nWaiting for lcores to finish...\n");
1446 rte_eal_mp_wait_lcore();
1447 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1448 if (port_fwd_end != NULL) {
1449 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1450 pt_id = fwd_ports_ids[i];
1451 (*port_fwd_end)(pt_id);
1454 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1457 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1458 if (cur_fwd_config.nb_fwd_streams >
1459 cur_fwd_config.nb_fwd_ports) {
1460 fwd_stream_stats_display(sm_id);
1461 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1462 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1464 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1466 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1469 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1470 tx_dropped = (uint64_t) (tx_dropped +
1471 fwd_streams[sm_id]->fwd_dropped);
1472 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1475 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1476 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1477 fwd_streams[sm_id]->rx_bad_ip_csum);
1478 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1482 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1483 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1484 fwd_streams[sm_id]->rx_bad_l4_csum);
1485 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1488 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1489 fwd_cycles = (uint64_t) (fwd_cycles +
1490 fwd_streams[sm_id]->core_cycles);
1495 total_rx_dropped = 0;
1496 total_tx_dropped = 0;
1497 total_rx_nombuf = 0;
1498 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1499 pt_id = fwd_ports_ids[i];
1501 port = &ports[pt_id];
1502 rte_eth_stats_get(pt_id, &stats);
1503 stats.ipackets -= port->stats.ipackets;
1504 port->stats.ipackets = 0;
1505 stats.opackets -= port->stats.opackets;
1506 port->stats.opackets = 0;
1507 stats.ibytes -= port->stats.ibytes;
1508 port->stats.ibytes = 0;
1509 stats.obytes -= port->stats.obytes;
1510 port->stats.obytes = 0;
1511 stats.imissed -= port->stats.imissed;
1512 port->stats.imissed = 0;
1513 stats.oerrors -= port->stats.oerrors;
1514 port->stats.oerrors = 0;
1515 stats.rx_nombuf -= port->stats.rx_nombuf;
1516 port->stats.rx_nombuf = 0;
1518 total_recv += stats.ipackets;
1519 total_xmit += stats.opackets;
1520 total_rx_dropped += stats.imissed;
1521 total_tx_dropped += port->tx_dropped;
1522 total_rx_nombuf += stats.rx_nombuf;
1524 fwd_port_stats_display(pt_id, &stats);
1527 printf("\n %s Accumulated forward statistics for all ports"
1529 acc_stats_border, acc_stats_border);
1530 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1532 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1534 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1535 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1536 if (total_rx_nombuf > 0)
1537 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1538 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1540 acc_stats_border, acc_stats_border);
1541 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1543 printf("\n CPU cycles/packet=%u (total cycles="
1544 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1545 (unsigned int)(fwd_cycles / total_recv),
1546 fwd_cycles, total_recv);
1548 printf("\nDone.\n");
1553 dev_set_link_up(portid_t pid)
1555 if (rte_eth_dev_set_link_up(pid) < 0)
1556 printf("\nSet link up fail.\n");
1560 dev_set_link_down(portid_t pid)
1562 if (rte_eth_dev_set_link_down(pid) < 0)
1563 printf("\nSet link down fail.\n");
1567 all_ports_started(void)
1570 struct rte_port *port;
1572 RTE_ETH_FOREACH_DEV(pi) {
1574 /* Check if there is a port which is not started */
1575 if ((port->port_status != RTE_PORT_STARTED) &&
1576 (port->slave_flag == 0))
1580 /* No port is not started */
1585 port_is_stopped(portid_t port_id)
1587 struct rte_port *port = &ports[port_id];
1589 if ((port->port_status != RTE_PORT_STOPPED) &&
1590 (port->slave_flag == 0))
1596 all_ports_stopped(void)
1600 RTE_ETH_FOREACH_DEV(pi) {
1601 if (!port_is_stopped(pi))
1609 port_is_started(portid_t port_id)
1611 if (port_id_is_invalid(port_id, ENABLED_WARN))
1614 if (ports[port_id].port_status != RTE_PORT_STARTED)
1621 port_is_closed(portid_t port_id)
1623 if (port_id_is_invalid(port_id, ENABLED_WARN))
1626 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1633 start_port(portid_t pid)
1635 int diag, need_check_link_status = -1;
1638 struct rte_port *port;
1639 struct ether_addr mac_addr;
1640 enum rte_eth_event_type event_type;
1642 if (port_id_is_invalid(pid, ENABLED_WARN))
1647 RTE_ETH_FOREACH_DEV(pi) {
1648 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1651 need_check_link_status = 0;
1653 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1654 RTE_PORT_HANDLING) == 0) {
1655 printf("Port %d is now not stopped\n", pi);
1659 if (port->need_reconfig > 0) {
1660 port->need_reconfig = 0;
1662 if (flow_isolate_all) {
1663 int ret = port_flow_isolate(pi, 1);
1665 printf("Failed to apply isolated"
1666 " mode on port %d\n", pi);
1671 printf("Configuring Port %d (socket %u)\n", pi,
1673 /* configure port */
1674 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1677 if (rte_atomic16_cmpset(&(port->port_status),
1678 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1679 printf("Port %d can not be set back "
1680 "to stopped\n", pi);
1681 printf("Fail to configure port %d\n", pi);
1682 /* try to reconfigure port next time */
1683 port->need_reconfig = 1;
1687 if (port->need_reconfig_queues > 0) {
1688 port->need_reconfig_queues = 0;
1689 /* setup tx queues */
1690 for (qi = 0; qi < nb_txq; qi++) {
1691 if ((numa_support) &&
1692 (txring_numa[pi] != NUMA_NO_CONFIG))
1693 diag = rte_eth_tx_queue_setup(pi, qi,
1694 port->nb_tx_desc[qi],
1696 &(port->tx_conf[qi]));
1698 diag = rte_eth_tx_queue_setup(pi, qi,
1699 port->nb_tx_desc[qi],
1701 &(port->tx_conf[qi]));
1706 /* Fail to setup tx queue, return */
1707 if (rte_atomic16_cmpset(&(port->port_status),
1709 RTE_PORT_STOPPED) == 0)
1710 printf("Port %d can not be set back "
1711 "to stopped\n", pi);
1712 printf("Fail to configure port %d tx queues\n",
1714 /* try to reconfigure queues next time */
1715 port->need_reconfig_queues = 1;
1718 for (qi = 0; qi < nb_rxq; qi++) {
1719 /* setup rx queues */
1720 if ((numa_support) &&
1721 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1722 struct rte_mempool * mp =
1723 mbuf_pool_find(rxring_numa[pi]);
1725 printf("Failed to setup RX queue:"
1726 "No mempool allocation"
1727 " on the socket %d\n",
1732 diag = rte_eth_rx_queue_setup(pi, qi,
1733 port->nb_rx_desc[qi],
1735 &(port->rx_conf[qi]),
1738 struct rte_mempool *mp =
1739 mbuf_pool_find(port->socket_id);
1741 printf("Failed to setup RX queue:"
1742 "No mempool allocation"
1743 " on the socket %d\n",
1747 diag = rte_eth_rx_queue_setup(pi, qi,
1748 port->nb_rx_desc[qi],
1750 &(port->rx_conf[qi]),
1756 /* Fail to setup rx queue, return */
1757 if (rte_atomic16_cmpset(&(port->port_status),
1759 RTE_PORT_STOPPED) == 0)
1760 printf("Port %d can not be set back "
1761 "to stopped\n", pi);
1762 printf("Fail to configure port %d rx queues\n",
1764 /* try to reconfigure queues next time */
1765 port->need_reconfig_queues = 1;
1771 if (rte_eth_dev_start(pi) < 0) {
1772 printf("Fail to start port %d\n", pi);
1774 /* Fail to setup rx queue, return */
1775 if (rte_atomic16_cmpset(&(port->port_status),
1776 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1777 printf("Port %d can not be set back to "
1782 if (rte_atomic16_cmpset(&(port->port_status),
1783 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1784 printf("Port %d can not be set into started\n", pi);
1786 rte_eth_macaddr_get(pi, &mac_addr);
1787 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1788 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1789 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1790 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1792 /* at least one port started, need checking link status */
1793 need_check_link_status = 1;
1796 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1797 event_type < RTE_ETH_EVENT_MAX;
1799 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1804 printf("Failed to setup even callback for event %d\n",
1810 if (need_check_link_status == 1 && !no_link_check)
1811 check_all_ports_link_status(RTE_PORT_ALL);
1812 else if (need_check_link_status == 0)
1813 printf("Please stop the ports first\n");
1820 stop_port(portid_t pid)
1823 struct rte_port *port;
1824 int need_check_link_status = 0;
1831 if (port_id_is_invalid(pid, ENABLED_WARN))
1834 printf("Stopping ports...\n");
1836 RTE_ETH_FOREACH_DEV(pi) {
1837 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1840 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1841 printf("Please remove port %d from forwarding configuration.\n", pi);
1845 if (port_is_bonding_slave(pi)) {
1846 printf("Please remove port %d from bonded device.\n", pi);
1851 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1852 RTE_PORT_HANDLING) == 0)
1855 rte_eth_dev_stop(pi);
1857 if (rte_atomic16_cmpset(&(port->port_status),
1858 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1859 printf("Port %d can not be set into stopped\n", pi);
1860 need_check_link_status = 1;
1862 if (need_check_link_status && !no_link_check)
1863 check_all_ports_link_status(RTE_PORT_ALL);
1869 close_port(portid_t pid)
1872 struct rte_port *port;
1874 if (port_id_is_invalid(pid, ENABLED_WARN))
1877 printf("Closing ports...\n");
1879 RTE_ETH_FOREACH_DEV(pi) {
1880 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1883 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1884 printf("Please remove port %d from forwarding configuration.\n", pi);
1888 if (port_is_bonding_slave(pi)) {
1889 printf("Please remove port %d from bonded device.\n", pi);
1894 if (rte_atomic16_cmpset(&(port->port_status),
1895 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1896 printf("Port %d is already closed\n", pi);
1900 if (rte_atomic16_cmpset(&(port->port_status),
1901 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1902 printf("Port %d is now not stopped\n", pi);
1906 if (port->flow_list)
1907 port_flow_flush(pi);
1908 rte_eth_dev_close(pi);
1910 if (rte_atomic16_cmpset(&(port->port_status),
1911 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1912 printf("Port %d cannot be set to closed\n", pi);
1919 reset_port(portid_t pid)
1923 struct rte_port *port;
1925 if (port_id_is_invalid(pid, ENABLED_WARN))
1928 printf("Resetting ports...\n");
1930 RTE_ETH_FOREACH_DEV(pi) {
1931 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1934 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1935 printf("Please remove port %d from forwarding "
1936 "configuration.\n", pi);
1940 if (port_is_bonding_slave(pi)) {
1941 printf("Please remove port %d from bonded device.\n",
1946 diag = rte_eth_dev_reset(pi);
1949 port->need_reconfig = 1;
1950 port->need_reconfig_queues = 1;
1952 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1960 eth_dev_event_callback_register(void)
1964 /* register the device event callback */
1965 ret = rte_dev_event_callback_register(NULL,
1966 eth_dev_event_callback, NULL);
1968 printf("Failed to register device event callback\n");
1977 eth_dev_event_callback_unregister(void)
1981 /* unregister the device event callback */
1982 ret = rte_dev_event_callback_unregister(NULL,
1983 eth_dev_event_callback, NULL);
1985 printf("Failed to unregister device event callback\n");
1993 attach_port(char *identifier)
1996 unsigned int socket_id;
1998 printf("Attaching a new port...\n");
2000 if (identifier == NULL) {
2001 printf("Invalid parameters are specified\n");
2005 if (rte_eth_dev_attach(identifier, &pi))
2008 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2009 /* if socket_id is invalid, set to 0 */
2010 if (check_socket_id(socket_id) < 0)
2012 reconfig(pi, socket_id);
2013 rte_eth_promiscuous_enable(pi);
2015 nb_ports = rte_eth_dev_count_avail();
2017 ports[pi].port_status = RTE_PORT_STOPPED;
2019 update_fwd_ports(pi);
2021 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2026 detach_port(portid_t port_id)
2028 char name[RTE_ETH_NAME_MAX_LEN];
2030 printf("Detaching a port...\n");
2032 if (!port_is_closed(port_id)) {
2033 printf("Please close port first\n");
2037 if (ports[port_id].flow_list)
2038 port_flow_flush(port_id);
2040 if (rte_eth_dev_detach(port_id, name)) {
2041 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2045 nb_ports = rte_eth_dev_count_avail();
2047 update_fwd_ports(RTE_MAX_ETHPORTS);
2049 printf("Port %u is detached. Now total ports is %d\n",
2058 struct rte_device *device;
2063 stop_packet_forwarding();
2065 if (ports != NULL) {
2067 RTE_ETH_FOREACH_DEV(pt_id) {
2068 printf("\nShutting down port %d...\n", pt_id);
2074 * This is a workaround to fix a virtio-user issue that
2075 * requires to call clean-up routine to remove existing
2077 * This workaround valid only for testpmd, needs a fix
2078 * valid for all applications.
2079 * TODO: Implement proper resource cleanup
2081 device = rte_eth_devices[pt_id].device;
2082 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2088 ret = rte_dev_event_monitor_stop();
2091 "fail to stop device event monitor.");
2093 ret = eth_dev_event_callback_unregister();
2096 "fail to unregister all event callbacks.");
2099 printf("\nBye...\n");
2102 typedef void (*cmd_func_t)(void);
2103 struct pmd_test_command {
2104 const char *cmd_name;
2105 cmd_func_t cmd_func;
2108 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2110 /* Check the link status of all ports in up to 9s, and print them finally */
2112 check_all_ports_link_status(uint32_t port_mask)
2114 #define CHECK_INTERVAL 100 /* 100ms */
2115 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2117 uint8_t count, all_ports_up, print_flag = 0;
2118 struct rte_eth_link link;
2120 printf("Checking link statuses...\n");
2122 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2124 RTE_ETH_FOREACH_DEV(portid) {
2125 if ((port_mask & (1 << portid)) == 0)
2127 memset(&link, 0, sizeof(link));
2128 rte_eth_link_get_nowait(portid, &link);
2129 /* print link status if flag set */
2130 if (print_flag == 1) {
2131 if (link.link_status)
2133 "Port%d Link Up. speed %u Mbps- %s\n",
2134 portid, link.link_speed,
2135 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2136 ("full-duplex") : ("half-duplex\n"));
2138 printf("Port %d Link Down\n", portid);
2141 /* clear all_ports_up flag if any link down */
2142 if (link.link_status == ETH_LINK_DOWN) {
2147 /* after finally printing all link status, get out */
2148 if (print_flag == 1)
2151 if (all_ports_up == 0) {
2153 rte_delay_ms(CHECK_INTERVAL);
2156 /* set the print_flag if all ports up or timeout */
2157 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2167 rmv_event_callback(void *arg)
2169 int need_to_start = 0;
2170 int org_no_link_check = no_link_check;
2171 portid_t port_id = (intptr_t)arg;
2173 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2175 if (!test_done && port_is_forwarding(port_id)) {
2177 stop_packet_forwarding();
2181 no_link_check = org_no_link_check;
2182 close_port(port_id);
2183 detach_port(port_id);
2185 start_packet_forwarding(0);
2188 /* This function is used by the interrupt thread */
2190 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2193 static const char * const event_desc[] = {
2194 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2195 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2196 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2197 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2198 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2199 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2200 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2201 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2202 [RTE_ETH_EVENT_NEW] = "device probed",
2203 [RTE_ETH_EVENT_DESTROY] = "device released",
2204 [RTE_ETH_EVENT_MAX] = NULL,
2207 RTE_SET_USED(param);
2208 RTE_SET_USED(ret_param);
2210 if (type >= RTE_ETH_EVENT_MAX) {
2211 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2212 port_id, __func__, type);
2214 } else if (event_print_mask & (UINT32_C(1) << type)) {
2215 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2220 if (port_id_is_invalid(port_id, DISABLED_WARN))
2224 case RTE_ETH_EVENT_INTR_RMV:
2225 if (rte_eal_alarm_set(100000,
2226 rmv_event_callback, (void *)(intptr_t)port_id))
2227 fprintf(stderr, "Could not set up deferred device removal\n");
2235 /* This function is used by the interrupt thread */
2237 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2238 __rte_unused void *arg)
2240 if (type >= RTE_DEV_EVENT_MAX) {
2241 fprintf(stderr, "%s called upon invalid event %d\n",
2247 case RTE_DEV_EVENT_REMOVE:
2248 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2250 /* TODO: After finish failure handle, begin to stop
2251 * packet forward, stop port, close port, detach port.
2254 case RTE_DEV_EVENT_ADD:
2255 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2257 /* TODO: After finish kernel driver binding,
2258 * begin to attach port.
2267 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2271 uint8_t mapping_found = 0;
2273 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2274 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2275 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2276 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2277 tx_queue_stats_mappings[i].queue_id,
2278 tx_queue_stats_mappings[i].stats_counter_id);
2285 port->tx_queue_stats_mapping_enabled = 1;
2290 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2294 uint8_t mapping_found = 0;
2296 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2297 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2298 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2299 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2300 rx_queue_stats_mappings[i].queue_id,
2301 rx_queue_stats_mappings[i].stats_counter_id);
2308 port->rx_queue_stats_mapping_enabled = 1;
2313 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2317 diag = set_tx_queue_stats_mapping_registers(pi, port);
2319 if (diag == -ENOTSUP) {
2320 port->tx_queue_stats_mapping_enabled = 0;
2321 printf("TX queue stats mapping not supported port id=%d\n", pi);
2324 rte_exit(EXIT_FAILURE,
2325 "set_tx_queue_stats_mapping_registers "
2326 "failed for port id=%d diag=%d\n",
2330 diag = set_rx_queue_stats_mapping_registers(pi, port);
2332 if (diag == -ENOTSUP) {
2333 port->rx_queue_stats_mapping_enabled = 0;
2334 printf("RX queue stats mapping not supported port id=%d\n", pi);
2337 rte_exit(EXIT_FAILURE,
2338 "set_rx_queue_stats_mapping_registers "
2339 "failed for port id=%d diag=%d\n",
2345 rxtx_port_config(struct rte_port *port)
2349 for (qid = 0; qid < nb_rxq; qid++) {
2350 port->rx_conf[qid] = port->dev_info.default_rxconf;
2352 /* Check if any Rx parameters have been passed */
2353 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2354 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2356 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2357 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2359 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2360 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2362 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2363 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2365 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2366 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2368 port->nb_rx_desc[qid] = nb_rxd;
2371 for (qid = 0; qid < nb_txq; qid++) {
2372 port->tx_conf[qid] = port->dev_info.default_txconf;
2374 /* Check if any Tx parameters have been passed */
2375 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2376 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2378 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2379 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2381 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2382 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2384 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2385 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2387 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2388 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2390 port->nb_tx_desc[qid] = nb_txd;
2395 init_port_config(void)
2398 struct rte_port *port;
2400 RTE_ETH_FOREACH_DEV(pid) {
2402 port->dev_conf.fdir_conf = fdir_conf;
2403 rte_eth_dev_info_get(pid, &port->dev_info);
2405 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2406 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2407 rss_hf & port->dev_info.flow_type_rss_offloads;
2409 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2410 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2413 if (port->dcb_flag == 0) {
2414 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2415 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2417 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2420 rxtx_port_config(port);
2422 rte_eth_macaddr_get(pid, &port->eth_addr);
2424 map_port_queue_stats_mapping_registers(pid, port);
2425 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2426 rte_pmd_ixgbe_bypass_init(pid);
2429 if (lsc_interrupt &&
2430 (rte_eth_devices[pid].data->dev_flags &
2431 RTE_ETH_DEV_INTR_LSC))
2432 port->dev_conf.intr_conf.lsc = 1;
2433 if (rmv_interrupt &&
2434 (rte_eth_devices[pid].data->dev_flags &
2435 RTE_ETH_DEV_INTR_RMV))
2436 port->dev_conf.intr_conf.rmv = 1;
2440 void set_port_slave_flag(portid_t slave_pid)
2442 struct rte_port *port;
2444 port = &ports[slave_pid];
2445 port->slave_flag = 1;
2448 void clear_port_slave_flag(portid_t slave_pid)
2450 struct rte_port *port;
2452 port = &ports[slave_pid];
2453 port->slave_flag = 0;
2456 uint8_t port_is_bonding_slave(portid_t slave_pid)
2458 struct rte_port *port;
2460 port = &ports[slave_pid];
2461 if ((rte_eth_devices[slave_pid].data->dev_flags &
2462 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2467 const uint16_t vlan_tags[] = {
2468 0, 1, 2, 3, 4, 5, 6, 7,
2469 8, 9, 10, 11, 12, 13, 14, 15,
2470 16, 17, 18, 19, 20, 21, 22, 23,
2471 24, 25, 26, 27, 28, 29, 30, 31
2475 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2476 enum dcb_mode_enable dcb_mode,
2477 enum rte_eth_nb_tcs num_tcs,
2483 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2484 * given above, and the number of traffic classes available for use.
2486 if (dcb_mode == DCB_VT_ENABLED) {
2487 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2488 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2489 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2490 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2492 /* VMDQ+DCB RX and TX configurations */
2493 vmdq_rx_conf->enable_default_pool = 0;
2494 vmdq_rx_conf->default_pool = 0;
2495 vmdq_rx_conf->nb_queue_pools =
2496 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2497 vmdq_tx_conf->nb_queue_pools =
2498 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2500 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2501 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2502 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2503 vmdq_rx_conf->pool_map[i].pools =
2504 1 << (i % vmdq_rx_conf->nb_queue_pools);
2506 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2507 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2508 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2511 /* set DCB mode of RX and TX of multiple queues */
2512 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2513 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2515 struct rte_eth_dcb_rx_conf *rx_conf =
2516 ð_conf->rx_adv_conf.dcb_rx_conf;
2517 struct rte_eth_dcb_tx_conf *tx_conf =
2518 ð_conf->tx_adv_conf.dcb_tx_conf;
2520 rx_conf->nb_tcs = num_tcs;
2521 tx_conf->nb_tcs = num_tcs;
2523 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2524 rx_conf->dcb_tc[i] = i % num_tcs;
2525 tx_conf->dcb_tc[i] = i % num_tcs;
2527 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2528 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2529 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2533 eth_conf->dcb_capability_en =
2534 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2536 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2542 init_port_dcb_config(portid_t pid,
2543 enum dcb_mode_enable dcb_mode,
2544 enum rte_eth_nb_tcs num_tcs,
2547 struct rte_eth_conf port_conf;
2548 struct rte_port *rte_port;
2552 rte_port = &ports[pid];
2554 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2555 /* Enter DCB configuration status */
2558 port_conf.rxmode = rte_port->dev_conf.rxmode;
2559 port_conf.txmode = rte_port->dev_conf.txmode;
2561 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2562 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2565 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2567 /* re-configure the device . */
2568 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2570 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2572 /* If dev_info.vmdq_pool_base is greater than 0,
2573 * the queue id of vmdq pools is started after pf queues.
2575 if (dcb_mode == DCB_VT_ENABLED &&
2576 rte_port->dev_info.vmdq_pool_base > 0) {
2577 printf("VMDQ_DCB multi-queue mode is nonsensical"
2578 " for port %d.", pid);
2582 /* Assume the ports in testpmd have the same dcb capability
2583 * and has the same number of rxq and txq in dcb mode
2585 if (dcb_mode == DCB_VT_ENABLED) {
2586 if (rte_port->dev_info.max_vfs > 0) {
2587 nb_rxq = rte_port->dev_info.nb_rx_queues;
2588 nb_txq = rte_port->dev_info.nb_tx_queues;
2590 nb_rxq = rte_port->dev_info.max_rx_queues;
2591 nb_txq = rte_port->dev_info.max_tx_queues;
2594 /*if vt is disabled, use all pf queues */
2595 if (rte_port->dev_info.vmdq_pool_base == 0) {
2596 nb_rxq = rte_port->dev_info.max_rx_queues;
2597 nb_txq = rte_port->dev_info.max_tx_queues;
2599 nb_rxq = (queueid_t)num_tcs;
2600 nb_txq = (queueid_t)num_tcs;
2604 rx_free_thresh = 64;
2606 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2608 rxtx_port_config(rte_port);
2610 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2611 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2612 rx_vft_set(pid, vlan_tags[i], 1);
2614 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2615 map_port_queue_stats_mapping_registers(pid, rte_port);
2617 rte_port->dcb_flag = 1;
2625 /* Configuration of Ethernet ports. */
2626 ports = rte_zmalloc("testpmd: ports",
2627 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2628 RTE_CACHE_LINE_SIZE);
2629 if (ports == NULL) {
2630 rte_exit(EXIT_FAILURE,
2631 "rte_zmalloc(%d struct rte_port) failed\n",
2647 const char clr[] = { 27, '[', '2', 'J', '\0' };
2648 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2650 /* Clear screen and move to top left */
2651 printf("%s%s", clr, top_left);
2653 printf("\nPort statistics ====================================");
2654 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2655 nic_stats_display(fwd_ports_ids[i]);
2659 signal_handler(int signum)
2661 if (signum == SIGINT || signum == SIGTERM) {
2662 printf("\nSignal %d received, preparing to exit...\n",
2664 #ifdef RTE_LIBRTE_PDUMP
2665 /* uninitialize packet capture framework */
2668 #ifdef RTE_LIBRTE_LATENCY_STATS
2669 rte_latencystats_uninit();
2672 /* Set flag to indicate the force termination. */
2674 /* exit with the expected status */
2675 signal(signum, SIG_DFL);
2676 kill(getpid(), signum);
2681 main(int argc, char** argv)
2687 signal(SIGINT, signal_handler);
2688 signal(SIGTERM, signal_handler);
2690 diag = rte_eal_init(argc, argv);
2692 rte_panic("Cannot init EAL\n");
2694 testpmd_logtype = rte_log_register("testpmd");
2695 if (testpmd_logtype < 0)
2696 rte_panic("Cannot register log type");
2697 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2699 #ifdef RTE_LIBRTE_PDUMP
2700 /* initialize packet capture framework */
2701 rte_pdump_init(NULL);
2704 nb_ports = (portid_t) rte_eth_dev_count_avail();
2706 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2708 /* allocate port structures, and init them */
2711 set_def_fwd_config();
2713 rte_panic("Empty set of forwarding logical cores - check the "
2714 "core mask supplied in the command parameters\n");
2716 /* Bitrate/latency stats disabled by default */
2717 #ifdef RTE_LIBRTE_BITRATE
2718 bitrate_enabled = 0;
2720 #ifdef RTE_LIBRTE_LATENCY_STATS
2721 latencystats_enabled = 0;
2724 /* on FreeBSD, mlockall() is disabled by default */
2725 #ifdef RTE_EXEC_ENV_BSDAPP
2734 launch_args_parse(argc, argv);
2736 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2737 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2741 if (tx_first && interactive)
2742 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2743 "interactive mode.\n");
2745 if (tx_first && lsc_interrupt) {
2746 printf("Warning: lsc_interrupt needs to be off when "
2747 " using tx_first. Disabling.\n");
2751 if (!nb_rxq && !nb_txq)
2752 printf("Warning: Either rx or tx queues should be non-zero\n");
2754 if (nb_rxq > 1 && nb_rxq > nb_txq)
2755 printf("Warning: nb_rxq=%d enables RSS configuration, "
2756 "but nb_txq=%d will prevent to fully test it.\n",
2762 /* enable hot plug monitoring */
2763 ret = rte_dev_event_monitor_start();
2768 eth_dev_event_callback_register();
2772 if (start_port(RTE_PORT_ALL) != 0)
2773 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2775 /* set all ports to promiscuous mode by default */
2776 RTE_ETH_FOREACH_DEV(port_id)
2777 rte_eth_promiscuous_enable(port_id);
2779 /* Init metrics library */
2780 rte_metrics_init(rte_socket_id());
2782 #ifdef RTE_LIBRTE_LATENCY_STATS
2783 if (latencystats_enabled != 0) {
2784 int ret = rte_latencystats_init(1, NULL);
2786 printf("Warning: latencystats init()"
2787 " returned error %d\n", ret);
2788 printf("Latencystats running on lcore %d\n",
2789 latencystats_lcore_id);
2793 /* Setup bitrate stats */
2794 #ifdef RTE_LIBRTE_BITRATE
2795 if (bitrate_enabled != 0) {
2796 bitrate_data = rte_stats_bitrate_create();
2797 if (bitrate_data == NULL)
2798 rte_exit(EXIT_FAILURE,
2799 "Could not allocate bitrate data.\n");
2800 rte_stats_bitrate_reg(bitrate_data);
2804 #ifdef RTE_LIBRTE_CMDLINE
2805 if (strlen(cmdline_filename) != 0)
2806 cmdline_read_from_file(cmdline_filename);
2808 if (interactive == 1) {
2810 printf("Start automatic packet forwarding\n");
2811 start_packet_forwarding(0);
2823 printf("No commandline core given, start packet forwarding\n");
2824 start_packet_forwarding(tx_first);
2825 if (stats_period != 0) {
2826 uint64_t prev_time = 0, cur_time, diff_time = 0;
2827 uint64_t timer_period;
2829 /* Convert to number of cycles */
2830 timer_period = stats_period * rte_get_timer_hz();
2832 while (f_quit == 0) {
2833 cur_time = rte_get_timer_cycles();
2834 diff_time += cur_time - prev_time;
2836 if (diff_time >= timer_period) {
2838 /* Reset the timer */
2841 /* Sleep to avoid unnecessary checks */
2842 prev_time = cur_time;
2847 printf("Press enter to exit\n");
2848 rc = read(0, &c, 1);