1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC
161 #ifdef RTE_LIBRTE_IEEE1588
162 &ieee1588_fwd_engine,
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
175 * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179 * In container, it cannot terminate the process which running with 'stats-period'
180 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
213 * Defaults are supplied by drivers via ethdev.
215 #define RTE_TEST_RX_DESC_DEFAULT 0
216 #define RTE_TEST_TX_DESC_DEFAULT 0
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 #define RTE_PMD_PARAM_UNSET -1
222 * Configurable values of RX and TX ring threshold registers.
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX free threshold.
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
239 * Configurable value of RX drop enable.
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX free threshold.
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX RS bit threshold.
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Flow API isolated mode.
271 uint8_t flow_isolate_all;
274 * Avoids to check link status when starting/stopping a port.
276 uint8_t no_link_check = 0; /* check by default */
279 * Enable link status change notification
281 uint8_t lsc_interrupt = 1; /* enabled by default */
284 * Enable device removal notification.
286 uint8_t rmv_interrupt = 1; /* enabled by default */
288 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
291 * Display or mask ether events
292 * Default to all events except VF_MBOX
294 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
295 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
296 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
298 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
299 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 * Decide if all memory are locked for performance.
307 * NIC bypass mode configuration options.
310 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
311 /* The NIC bypass watchdog timeout. */
312 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
316 #ifdef RTE_LIBRTE_LATENCY_STATS
319 * Set when latency stats is enabled in the commandline
321 uint8_t latencystats_enabled;
324 * Lcore ID to serive latency statistics.
326 lcoreid_t latencystats_lcore_id = -1;
331 * Ethernet device configuration.
333 struct rte_eth_rxmode rx_mode = {
334 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
335 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
338 struct rte_eth_txmode tx_mode = {
339 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 struct rte_fdir_conf fdir_conf = {
343 .mode = RTE_FDIR_MODE_NONE,
344 .pballoc = RTE_FDIR_PBALLOC_64K,
345 .status = RTE_FDIR_REPORT_STATUS,
347 .vlan_tci_mask = 0xFFEF,
349 .src_ip = 0xFFFFFFFF,
350 .dst_ip = 0xFFFFFFFF,
353 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 .src_port_mask = 0xFFFF,
357 .dst_port_mask = 0xFFFF,
358 .mac_addr_byte_mask = 0xFF,
359 .tunnel_type_mask = 1,
360 .tunnel_id_mask = 0xFFFFFFFF,
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
377 * Display zero values by default for xstats
379 uint8_t xstats_hide_zero;
381 unsigned int num_sockets = 0;
382 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
384 #ifdef RTE_LIBRTE_BITRATE
385 /* Bitrate statistics */
386 struct rte_stats_bitrates *bitrate_data;
387 lcoreid_t bitrate_lcore_id;
388 uint8_t bitrate_enabled;
391 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
392 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
394 struct vxlan_encap_conf vxlan_encap_conf = {
397 .vni = "\x00\x00\x00",
399 .udp_dst = RTE_BE16(4789),
400 .ipv4_src = IPv4(127, 0, 0, 1),
401 .ipv4_dst = IPv4(255, 255, 255, 255),
402 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
403 "\x00\x00\x00\x00\x00\x00\x00\x01",
404 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
405 "\x00\x00\x00\x00\x00\x00\x11\x11",
407 .eth_src = "\x00\x00\x00\x00\x00\x00",
408 .eth_dst = "\xff\xff\xff\xff\xff\xff",
411 /* Forward function declarations */
412 static void map_port_queue_stats_mapping_registers(portid_t pi,
413 struct rte_port *port);
414 static void check_all_ports_link_status(uint32_t port_mask);
415 static int eth_event_callback(portid_t port_id,
416 enum rte_eth_event_type type,
417 void *param, void *ret_param);
418 static void eth_dev_event_callback(char *device_name,
419 enum rte_dev_event_type type,
421 static int eth_dev_event_callback_register(void);
422 static int eth_dev_event_callback_unregister(void);
426 * Check if all the ports are started.
427 * If yes, return positive value. If not, return zero.
429 static int all_ports_started(void);
431 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
432 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
435 * Helper function to check if socket is already discovered.
436 * If yes, return positive value. If not, return zero.
439 new_socket_id(unsigned int socket_id)
443 for (i = 0; i < num_sockets; i++) {
444 if (socket_ids[i] == socket_id)
451 * Setup default configuration.
454 set_default_fwd_lcores_config(void)
458 unsigned int sock_num;
461 for (i = 0; i < RTE_MAX_LCORE; i++) {
462 sock_num = rte_lcore_to_socket_id(i);
463 if (new_socket_id(sock_num)) {
464 if (num_sockets >= RTE_MAX_NUMA_NODES) {
465 rte_exit(EXIT_FAILURE,
466 "Total sockets greater than %u\n",
469 socket_ids[num_sockets++] = sock_num;
471 if (!rte_lcore_is_enabled(i))
473 if (i == rte_get_master_lcore())
475 fwd_lcores_cpuids[nb_lc++] = i;
477 nb_lcores = (lcoreid_t) nb_lc;
478 nb_cfg_lcores = nb_lcores;
483 set_def_peer_eth_addrs(void)
487 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
488 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
489 peer_eth_addrs[i].addr_bytes[5] = i;
494 set_default_fwd_ports_config(void)
499 RTE_ETH_FOREACH_DEV(pt_id)
500 fwd_ports_ids[i++] = pt_id;
502 nb_cfg_ports = nb_ports;
503 nb_fwd_ports = nb_ports;
507 set_def_fwd_config(void)
509 set_default_fwd_lcores_config();
510 set_def_peer_eth_addrs();
511 set_default_fwd_ports_config();
515 * Configuration initialisation done once at init time.
518 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
519 unsigned int socket_id)
521 char pool_name[RTE_MEMPOOL_NAMESIZE];
522 struct rte_mempool *rte_mp = NULL;
525 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
526 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
529 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
530 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
533 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
534 mb_size, (unsigned) mb_mempool_cache,
535 sizeof(struct rte_pktmbuf_pool_private),
540 if (rte_mempool_populate_anon(rte_mp) == 0) {
541 rte_mempool_free(rte_mp);
545 rte_pktmbuf_pool_init(rte_mp, NULL);
546 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
548 /* wrapper to rte_mempool_create() */
549 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
550 rte_mbuf_best_mempool_ops());
551 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
552 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
556 if (rte_mp == NULL) {
557 rte_exit(EXIT_FAILURE,
558 "Creation of mbuf pool for socket %u failed: %s\n",
559 socket_id, rte_strerror(rte_errno));
560 } else if (verbose_level > 0) {
561 rte_mempool_dump(stdout, rte_mp);
566 * Check given socket id is valid or not with NUMA mode,
567 * if valid, return 0, else return -1
570 check_socket_id(const unsigned int socket_id)
572 static int warning_once = 0;
574 if (new_socket_id(socket_id)) {
575 if (!warning_once && numa_support)
576 printf("Warning: NUMA should be configured manually by"
577 " using --port-numa-config and"
578 " --ring-numa-config parameters along with"
587 * Get the allowed maximum number of RX queues.
588 * *pid return the port id which has minimal value of
589 * max_rx_queues in all ports.
592 get_allowed_max_nb_rxq(portid_t *pid)
594 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
596 struct rte_eth_dev_info dev_info;
598 RTE_ETH_FOREACH_DEV(pi) {
599 rte_eth_dev_info_get(pi, &dev_info);
600 if (dev_info.max_rx_queues < allowed_max_rxq) {
601 allowed_max_rxq = dev_info.max_rx_queues;
605 return allowed_max_rxq;
609 * Check input rxq is valid or not.
610 * If input rxq is not greater than any of maximum number
611 * of RX queues of all ports, it is valid.
612 * if valid, return 0, else return -1
615 check_nb_rxq(queueid_t rxq)
617 queueid_t allowed_max_rxq;
620 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
621 if (rxq > allowed_max_rxq) {
622 printf("Fail: input rxq (%u) can't be greater "
623 "than max_rx_queues (%u) of port %u\n",
633 * Get the allowed maximum number of TX queues.
634 * *pid return the port id which has minimal value of
635 * max_tx_queues in all ports.
638 get_allowed_max_nb_txq(portid_t *pid)
640 queueid_t allowed_max_txq = MAX_QUEUE_ID;
642 struct rte_eth_dev_info dev_info;
644 RTE_ETH_FOREACH_DEV(pi) {
645 rte_eth_dev_info_get(pi, &dev_info);
646 if (dev_info.max_tx_queues < allowed_max_txq) {
647 allowed_max_txq = dev_info.max_tx_queues;
651 return allowed_max_txq;
655 * Check input txq is valid or not.
656 * If input txq is not greater than any of maximum number
657 * of TX queues of all ports, it is valid.
658 * if valid, return 0, else return -1
661 check_nb_txq(queueid_t txq)
663 queueid_t allowed_max_txq;
666 allowed_max_txq = get_allowed_max_nb_txq(&pid);
667 if (txq > allowed_max_txq) {
668 printf("Fail: input txq (%u) can't be greater "
669 "than max_tx_queues (%u) of port %u\n",
682 struct rte_port *port;
683 struct rte_mempool *mbp;
684 unsigned int nb_mbuf_per_pool;
686 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
687 struct rte_gro_param gro_param;
691 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
694 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
695 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
696 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
699 /* Configuration of logical cores. */
700 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
701 sizeof(struct fwd_lcore *) * nb_lcores,
702 RTE_CACHE_LINE_SIZE);
703 if (fwd_lcores == NULL) {
704 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
705 "failed\n", nb_lcores);
707 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
708 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
709 sizeof(struct fwd_lcore),
710 RTE_CACHE_LINE_SIZE);
711 if (fwd_lcores[lc_id] == NULL) {
712 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
715 fwd_lcores[lc_id]->cpuid_idx = lc_id;
718 RTE_ETH_FOREACH_DEV(pid) {
720 /* Apply default TxRx configuration for all ports */
721 port->dev_conf.txmode = tx_mode;
722 port->dev_conf.rxmode = rx_mode;
723 rte_eth_dev_info_get(pid, &port->dev_info);
725 if (!(port->dev_info.rx_offload_capa &
726 DEV_RX_OFFLOAD_CRC_STRIP))
727 port->dev_conf.rxmode.offloads &=
728 ~DEV_RX_OFFLOAD_CRC_STRIP;
729 if (!(port->dev_info.tx_offload_capa &
730 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
731 port->dev_conf.txmode.offloads &=
732 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
734 if (port_numa[pid] != NUMA_NO_CONFIG)
735 port_per_socket[port_numa[pid]]++;
737 uint32_t socket_id = rte_eth_dev_socket_id(pid);
739 /* if socket_id is invalid, set to 0 */
740 if (check_socket_id(socket_id) < 0)
742 port_per_socket[socket_id]++;
746 /* Apply Rx offloads configuration */
747 for (k = 0; k < port->dev_info.max_rx_queues; k++)
748 port->rx_conf[k].offloads =
749 port->dev_conf.rxmode.offloads;
750 /* Apply Tx offloads configuration */
751 for (k = 0; k < port->dev_info.max_tx_queues; k++)
752 port->tx_conf[k].offloads =
753 port->dev_conf.txmode.offloads;
755 /* set flag to initialize port/queue */
756 port->need_reconfig = 1;
757 port->need_reconfig_queues = 1;
761 * Create pools of mbuf.
762 * If NUMA support is disabled, create a single pool of mbuf in
763 * socket 0 memory by default.
764 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
766 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
767 * nb_txd can be configured at run time.
769 if (param_total_num_mbufs)
770 nb_mbuf_per_pool = param_total_num_mbufs;
772 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
773 (nb_lcores * mb_mempool_cache) +
774 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
775 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
781 for (i = 0; i < num_sockets; i++)
782 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
785 if (socket_num == UMA_NO_CONFIG)
786 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
788 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
794 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
795 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
797 * Records which Mbuf pool to use by each logical core, if needed.
799 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
800 mbp = mbuf_pool_find(
801 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
804 mbp = mbuf_pool_find(0);
805 fwd_lcores[lc_id]->mbp = mbp;
806 /* initialize GSO context */
807 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
808 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
809 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
810 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
812 fwd_lcores[lc_id]->gso_ctx.flag = 0;
815 /* Configuration of packet forwarding streams. */
816 if (init_fwd_streams() < 0)
817 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
821 /* create a gro context for each lcore */
822 gro_param.gro_types = RTE_GRO_TCP_IPV4;
823 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
824 gro_param.max_item_per_flow = MAX_PKT_BURST;
825 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
826 gro_param.socket_id = rte_lcore_to_socket_id(
827 fwd_lcores_cpuids[lc_id]);
828 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
829 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
830 rte_exit(EXIT_FAILURE,
831 "rte_gro_ctx_create() failed\n");
835 #if defined RTE_LIBRTE_PMD_SOFTNIC
836 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
837 RTE_ETH_FOREACH_DEV(pid) {
839 const char *driver = port->dev_info.driver_name;
841 if (strcmp(driver, "net_softnic") == 0)
842 port->softport.fwd_lcore_arg = fwd_lcores;
851 reconfig(portid_t new_port_id, unsigned socket_id)
853 struct rte_port *port;
855 /* Reconfiguration of Ethernet ports. */
856 port = &ports[new_port_id];
857 rte_eth_dev_info_get(new_port_id, &port->dev_info);
859 /* set flag to initialize port/queue */
860 port->need_reconfig = 1;
861 port->need_reconfig_queues = 1;
862 port->socket_id = socket_id;
869 init_fwd_streams(void)
872 struct rte_port *port;
873 streamid_t sm_id, nb_fwd_streams_new;
876 /* set socket id according to numa or not */
877 RTE_ETH_FOREACH_DEV(pid) {
879 if (nb_rxq > port->dev_info.max_rx_queues) {
880 printf("Fail: nb_rxq(%d) is greater than "
881 "max_rx_queues(%d)\n", nb_rxq,
882 port->dev_info.max_rx_queues);
885 if (nb_txq > port->dev_info.max_tx_queues) {
886 printf("Fail: nb_txq(%d) is greater than "
887 "max_tx_queues(%d)\n", nb_txq,
888 port->dev_info.max_tx_queues);
892 if (port_numa[pid] != NUMA_NO_CONFIG)
893 port->socket_id = port_numa[pid];
895 port->socket_id = rte_eth_dev_socket_id(pid);
897 /* if socket_id is invalid, set to 0 */
898 if (check_socket_id(port->socket_id) < 0)
903 if (socket_num == UMA_NO_CONFIG)
906 port->socket_id = socket_num;
910 q = RTE_MAX(nb_rxq, nb_txq);
912 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
915 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
916 if (nb_fwd_streams_new == nb_fwd_streams)
919 if (fwd_streams != NULL) {
920 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
921 if (fwd_streams[sm_id] == NULL)
923 rte_free(fwd_streams[sm_id]);
924 fwd_streams[sm_id] = NULL;
926 rte_free(fwd_streams);
931 nb_fwd_streams = nb_fwd_streams_new;
932 if (nb_fwd_streams) {
933 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
934 sizeof(struct fwd_stream *) * nb_fwd_streams,
935 RTE_CACHE_LINE_SIZE);
936 if (fwd_streams == NULL)
937 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
938 " (struct fwd_stream *)) failed\n",
941 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
942 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
943 " struct fwd_stream", sizeof(struct fwd_stream),
944 RTE_CACHE_LINE_SIZE);
945 if (fwd_streams[sm_id] == NULL)
946 rte_exit(EXIT_FAILURE, "rte_zmalloc"
947 "(struct fwd_stream) failed\n");
954 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
956 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
958 unsigned int total_burst;
959 unsigned int nb_burst;
960 unsigned int burst_stats[3];
961 uint16_t pktnb_stats[3];
963 int burst_percent[3];
966 * First compute the total number of packet bursts and the
967 * two highest numbers of bursts of the same number of packets.
970 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
971 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
972 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
973 nb_burst = pbs->pkt_burst_spread[nb_pkt];
976 total_burst += nb_burst;
977 if (nb_burst > burst_stats[0]) {
978 burst_stats[1] = burst_stats[0];
979 pktnb_stats[1] = pktnb_stats[0];
980 burst_stats[0] = nb_burst;
981 pktnb_stats[0] = nb_pkt;
982 } else if (nb_burst > burst_stats[1]) {
983 burst_stats[1] = nb_burst;
984 pktnb_stats[1] = nb_pkt;
987 if (total_burst == 0)
989 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
990 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
991 burst_percent[0], (int) pktnb_stats[0]);
992 if (burst_stats[0] == total_burst) {
996 if (burst_stats[0] + burst_stats[1] == total_burst) {
997 printf(" + %d%% of %d pkts]\n",
998 100 - burst_percent[0], pktnb_stats[1]);
1001 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1002 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1003 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1004 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1007 printf(" + %d%% of %d pkts + %d%% of others]\n",
1008 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1010 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1013 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1015 struct rte_port *port;
1018 static const char *fwd_stats_border = "----------------------";
1020 port = &ports[port_id];
1021 printf("\n %s Forward statistics for port %-2d %s\n",
1022 fwd_stats_border, port_id, fwd_stats_border);
1024 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1025 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1027 stats->ipackets, stats->imissed,
1028 (uint64_t) (stats->ipackets + stats->imissed));
1030 if (cur_fwd_eng == &csum_fwd_engine)
1031 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1032 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1033 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1034 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1035 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1038 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1040 stats->opackets, port->tx_dropped,
1041 (uint64_t) (stats->opackets + port->tx_dropped));
1044 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1046 stats->ipackets, stats->imissed,
1047 (uint64_t) (stats->ipackets + stats->imissed));
1049 if (cur_fwd_eng == &csum_fwd_engine)
1050 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1051 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1052 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1053 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1054 printf(" RX-nombufs: %14"PRIu64"\n",
1058 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1060 stats->opackets, port->tx_dropped,
1061 (uint64_t) (stats->opackets + port->tx_dropped));
1064 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1065 if (port->rx_stream)
1066 pkt_burst_stats_display("RX",
1067 &port->rx_stream->rx_burst_stats);
1068 if (port->tx_stream)
1069 pkt_burst_stats_display("TX",
1070 &port->tx_stream->tx_burst_stats);
1073 if (port->rx_queue_stats_mapping_enabled) {
1075 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1076 printf(" Stats reg %2d RX-packets:%14"PRIu64
1077 " RX-errors:%14"PRIu64
1078 " RX-bytes:%14"PRIu64"\n",
1079 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1083 if (port->tx_queue_stats_mapping_enabled) {
1084 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1085 printf(" Stats reg %2d TX-packets:%14"PRIu64
1086 " TX-bytes:%14"PRIu64"\n",
1087 i, stats->q_opackets[i], stats->q_obytes[i]);
1091 printf(" %s--------------------------------%s\n",
1092 fwd_stats_border, fwd_stats_border);
1096 fwd_stream_stats_display(streamid_t stream_id)
1098 struct fwd_stream *fs;
1099 static const char *fwd_top_stats_border = "-------";
1101 fs = fwd_streams[stream_id];
1102 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1103 (fs->fwd_dropped == 0))
1105 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1106 "TX Port=%2d/Queue=%2d %s\n",
1107 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1108 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1109 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1110 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1112 /* if checksum mode */
1113 if (cur_fwd_eng == &csum_fwd_engine) {
1114 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1115 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1118 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1119 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1120 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1125 flush_fwd_rx_queues(void)
1127 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1134 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1135 uint64_t timer_period;
1137 /* convert to number of cycles */
1138 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1140 for (j = 0; j < 2; j++) {
1141 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1142 for (rxq = 0; rxq < nb_rxq; rxq++) {
1143 port_id = fwd_ports_ids[rxp];
1145 * testpmd can stuck in the below do while loop
1146 * if rte_eth_rx_burst() always returns nonzero
1147 * packets. So timer is added to exit this loop
1148 * after 1sec timer expiry.
1150 prev_tsc = rte_rdtsc();
1152 nb_rx = rte_eth_rx_burst(port_id, rxq,
1153 pkts_burst, MAX_PKT_BURST);
1154 for (i = 0; i < nb_rx; i++)
1155 rte_pktmbuf_free(pkts_burst[i]);
1157 cur_tsc = rte_rdtsc();
1158 diff_tsc = cur_tsc - prev_tsc;
1159 timer_tsc += diff_tsc;
1160 } while ((nb_rx > 0) &&
1161 (timer_tsc < timer_period));
1165 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1170 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1172 struct fwd_stream **fsm;
1175 #ifdef RTE_LIBRTE_BITRATE
1176 uint64_t tics_per_1sec;
1177 uint64_t tics_datum;
1178 uint64_t tics_current;
1181 tics_datum = rte_rdtsc();
1182 tics_per_1sec = rte_get_timer_hz();
1184 fsm = &fwd_streams[fc->stream_idx];
1185 nb_fs = fc->stream_nb;
1187 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1188 (*pkt_fwd)(fsm[sm_id]);
1189 #ifdef RTE_LIBRTE_BITRATE
1190 if (bitrate_enabled != 0 &&
1191 bitrate_lcore_id == rte_lcore_id()) {
1192 tics_current = rte_rdtsc();
1193 if (tics_current - tics_datum >= tics_per_1sec) {
1194 /* Periodic bitrate calculation */
1195 RTE_ETH_FOREACH_DEV(idx_port)
1196 rte_stats_bitrate_calc(bitrate_data,
1198 tics_datum = tics_current;
1202 #ifdef RTE_LIBRTE_LATENCY_STATS
1203 if (latencystats_enabled != 0 &&
1204 latencystats_lcore_id == rte_lcore_id())
1205 rte_latencystats_update();
1208 } while (! fc->stopped);
1212 start_pkt_forward_on_core(void *fwd_arg)
1214 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1215 cur_fwd_config.fwd_eng->packet_fwd);
1220 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1221 * Used to start communication flows in network loopback test configurations.
1224 run_one_txonly_burst_on_core(void *fwd_arg)
1226 struct fwd_lcore *fwd_lc;
1227 struct fwd_lcore tmp_lcore;
1229 fwd_lc = (struct fwd_lcore *) fwd_arg;
1230 tmp_lcore = *fwd_lc;
1231 tmp_lcore.stopped = 1;
1232 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1237 * Launch packet forwarding:
1238 * - Setup per-port forwarding context.
1239 * - launch logical cores with their forwarding configuration.
1242 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1244 port_fwd_begin_t port_fwd_begin;
1249 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1250 if (port_fwd_begin != NULL) {
1251 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1252 (*port_fwd_begin)(fwd_ports_ids[i]);
1254 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1255 lc_id = fwd_lcores_cpuids[i];
1256 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1257 fwd_lcores[i]->stopped = 0;
1258 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1259 fwd_lcores[i], lc_id);
1261 printf("launch lcore %u failed - diag=%d\n",
1268 * Update the forward ports list.
1271 update_fwd_ports(portid_t new_pid)
1274 unsigned int new_nb_fwd_ports = 0;
1277 for (i = 0; i < nb_fwd_ports; ++i) {
1278 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1281 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1285 if (new_pid < RTE_MAX_ETHPORTS)
1286 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1288 nb_fwd_ports = new_nb_fwd_ports;
1289 nb_cfg_ports = new_nb_fwd_ports;
1293 * Launch packet forwarding configuration.
1296 start_packet_forwarding(int with_tx_first)
1298 port_fwd_begin_t port_fwd_begin;
1299 port_fwd_end_t port_fwd_end;
1300 struct rte_port *port;
1305 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1306 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1308 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1309 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1311 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1312 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1313 (!nb_rxq || !nb_txq))
1314 rte_exit(EXIT_FAILURE,
1315 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1316 cur_fwd_eng->fwd_mode_name);
1318 if (all_ports_started() == 0) {
1319 printf("Not all ports were started\n");
1322 if (test_done == 0) {
1323 printf("Packet forwarding already started\n");
1329 for (i = 0; i < nb_fwd_ports; i++) {
1330 pt_id = fwd_ports_ids[i];
1331 port = &ports[pt_id];
1332 if (!port->dcb_flag) {
1333 printf("In DCB mode, all forwarding ports must "
1334 "be configured in this mode.\n");
1338 if (nb_fwd_lcores == 1) {
1339 printf("In DCB mode,the nb forwarding cores "
1340 "should be larger than 1.\n");
1349 flush_fwd_rx_queues();
1351 pkt_fwd_config_display(&cur_fwd_config);
1352 rxtx_config_display();
1354 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1355 pt_id = fwd_ports_ids[i];
1356 port = &ports[pt_id];
1357 rte_eth_stats_get(pt_id, &port->stats);
1358 port->tx_dropped = 0;
1360 map_port_queue_stats_mapping_registers(pt_id, port);
1362 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1363 fwd_streams[sm_id]->rx_packets = 0;
1364 fwd_streams[sm_id]->tx_packets = 0;
1365 fwd_streams[sm_id]->fwd_dropped = 0;
1366 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1367 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1369 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1370 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1371 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1372 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1373 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1375 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1376 fwd_streams[sm_id]->core_cycles = 0;
1379 if (with_tx_first) {
1380 port_fwd_begin = tx_only_engine.port_fwd_begin;
1381 if (port_fwd_begin != NULL) {
1382 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1383 (*port_fwd_begin)(fwd_ports_ids[i]);
1385 while (with_tx_first--) {
1386 launch_packet_forwarding(
1387 run_one_txonly_burst_on_core);
1388 rte_eal_mp_wait_lcore();
1390 port_fwd_end = tx_only_engine.port_fwd_end;
1391 if (port_fwd_end != NULL) {
1392 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1393 (*port_fwd_end)(fwd_ports_ids[i]);
1396 launch_packet_forwarding(start_pkt_forward_on_core);
1400 stop_packet_forwarding(void)
1402 struct rte_eth_stats stats;
1403 struct rte_port *port;
1404 port_fwd_end_t port_fwd_end;
1409 uint64_t total_recv;
1410 uint64_t total_xmit;
1411 uint64_t total_rx_dropped;
1412 uint64_t total_tx_dropped;
1413 uint64_t total_rx_nombuf;
1414 uint64_t tx_dropped;
1415 uint64_t rx_bad_ip_csum;
1416 uint64_t rx_bad_l4_csum;
1417 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1418 uint64_t fwd_cycles;
1421 static const char *acc_stats_border = "+++++++++++++++";
1424 printf("Packet forwarding not started\n");
1427 printf("Telling cores to stop...");
1428 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1429 fwd_lcores[lc_id]->stopped = 1;
1430 printf("\nWaiting for lcores to finish...\n");
1431 rte_eal_mp_wait_lcore();
1432 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1433 if (port_fwd_end != NULL) {
1434 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1435 pt_id = fwd_ports_ids[i];
1436 (*port_fwd_end)(pt_id);
1439 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1442 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1443 if (cur_fwd_config.nb_fwd_streams >
1444 cur_fwd_config.nb_fwd_ports) {
1445 fwd_stream_stats_display(sm_id);
1446 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1447 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1449 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1451 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1454 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1455 tx_dropped = (uint64_t) (tx_dropped +
1456 fwd_streams[sm_id]->fwd_dropped);
1457 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1460 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1461 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1462 fwd_streams[sm_id]->rx_bad_ip_csum);
1463 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1467 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1468 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1469 fwd_streams[sm_id]->rx_bad_l4_csum);
1470 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1473 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1474 fwd_cycles = (uint64_t) (fwd_cycles +
1475 fwd_streams[sm_id]->core_cycles);
1480 total_rx_dropped = 0;
1481 total_tx_dropped = 0;
1482 total_rx_nombuf = 0;
1483 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1484 pt_id = fwd_ports_ids[i];
1486 port = &ports[pt_id];
1487 rte_eth_stats_get(pt_id, &stats);
1488 stats.ipackets -= port->stats.ipackets;
1489 port->stats.ipackets = 0;
1490 stats.opackets -= port->stats.opackets;
1491 port->stats.opackets = 0;
1492 stats.ibytes -= port->stats.ibytes;
1493 port->stats.ibytes = 0;
1494 stats.obytes -= port->stats.obytes;
1495 port->stats.obytes = 0;
1496 stats.imissed -= port->stats.imissed;
1497 port->stats.imissed = 0;
1498 stats.oerrors -= port->stats.oerrors;
1499 port->stats.oerrors = 0;
1500 stats.rx_nombuf -= port->stats.rx_nombuf;
1501 port->stats.rx_nombuf = 0;
1503 total_recv += stats.ipackets;
1504 total_xmit += stats.opackets;
1505 total_rx_dropped += stats.imissed;
1506 total_tx_dropped += port->tx_dropped;
1507 total_rx_nombuf += stats.rx_nombuf;
1509 fwd_port_stats_display(pt_id, &stats);
1512 printf("\n %s Accumulated forward statistics for all ports"
1514 acc_stats_border, acc_stats_border);
1515 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1517 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1519 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1520 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1521 if (total_rx_nombuf > 0)
1522 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1523 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1525 acc_stats_border, acc_stats_border);
1526 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1528 printf("\n CPU cycles/packet=%u (total cycles="
1529 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1530 (unsigned int)(fwd_cycles / total_recv),
1531 fwd_cycles, total_recv);
1533 printf("\nDone.\n");
1538 dev_set_link_up(portid_t pid)
1540 if (rte_eth_dev_set_link_up(pid) < 0)
1541 printf("\nSet link up fail.\n");
1545 dev_set_link_down(portid_t pid)
1547 if (rte_eth_dev_set_link_down(pid) < 0)
1548 printf("\nSet link down fail.\n");
1552 all_ports_started(void)
1555 struct rte_port *port;
1557 RTE_ETH_FOREACH_DEV(pi) {
1559 /* Check if there is a port which is not started */
1560 if ((port->port_status != RTE_PORT_STARTED) &&
1561 (port->slave_flag == 0))
1565 /* No port is not started */
1570 port_is_stopped(portid_t port_id)
1572 struct rte_port *port = &ports[port_id];
1574 if ((port->port_status != RTE_PORT_STOPPED) &&
1575 (port->slave_flag == 0))
1581 all_ports_stopped(void)
1585 RTE_ETH_FOREACH_DEV(pi) {
1586 if (!port_is_stopped(pi))
1594 port_is_started(portid_t port_id)
1596 if (port_id_is_invalid(port_id, ENABLED_WARN))
1599 if (ports[port_id].port_status != RTE_PORT_STARTED)
1606 port_is_closed(portid_t port_id)
1608 if (port_id_is_invalid(port_id, ENABLED_WARN))
1611 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1618 start_port(portid_t pid)
1620 int diag, need_check_link_status = -1;
1623 struct rte_port *port;
1624 struct ether_addr mac_addr;
1625 enum rte_eth_event_type event_type;
1627 if (port_id_is_invalid(pid, ENABLED_WARN))
1632 RTE_ETH_FOREACH_DEV(pi) {
1633 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1636 need_check_link_status = 0;
1638 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1639 RTE_PORT_HANDLING) == 0) {
1640 printf("Port %d is now not stopped\n", pi);
1644 if (port->need_reconfig > 0) {
1645 port->need_reconfig = 0;
1647 if (flow_isolate_all) {
1648 int ret = port_flow_isolate(pi, 1);
1650 printf("Failed to apply isolated"
1651 " mode on port %d\n", pi);
1656 printf("Configuring Port %d (socket %u)\n", pi,
1658 /* configure port */
1659 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1662 if (rte_atomic16_cmpset(&(port->port_status),
1663 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1664 printf("Port %d can not be set back "
1665 "to stopped\n", pi);
1666 printf("Fail to configure port %d\n", pi);
1667 /* try to reconfigure port next time */
1668 port->need_reconfig = 1;
1672 if (port->need_reconfig_queues > 0) {
1673 port->need_reconfig_queues = 0;
1674 /* setup tx queues */
1675 for (qi = 0; qi < nb_txq; qi++) {
1676 if ((numa_support) &&
1677 (txring_numa[pi] != NUMA_NO_CONFIG))
1678 diag = rte_eth_tx_queue_setup(pi, qi,
1679 port->nb_tx_desc[qi],
1681 &(port->tx_conf[qi]));
1683 diag = rte_eth_tx_queue_setup(pi, qi,
1684 port->nb_tx_desc[qi],
1686 &(port->tx_conf[qi]));
1691 /* Fail to setup tx queue, return */
1692 if (rte_atomic16_cmpset(&(port->port_status),
1694 RTE_PORT_STOPPED) == 0)
1695 printf("Port %d can not be set back "
1696 "to stopped\n", pi);
1697 printf("Fail to configure port %d tx queues\n",
1699 /* try to reconfigure queues next time */
1700 port->need_reconfig_queues = 1;
1703 for (qi = 0; qi < nb_rxq; qi++) {
1704 /* setup rx queues */
1705 if ((numa_support) &&
1706 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1707 struct rte_mempool * mp =
1708 mbuf_pool_find(rxring_numa[pi]);
1710 printf("Failed to setup RX queue:"
1711 "No mempool allocation"
1712 " on the socket %d\n",
1717 diag = rte_eth_rx_queue_setup(pi, qi,
1718 port->nb_rx_desc[qi],
1720 &(port->rx_conf[qi]),
1723 struct rte_mempool *mp =
1724 mbuf_pool_find(port->socket_id);
1726 printf("Failed to setup RX queue:"
1727 "No mempool allocation"
1728 " on the socket %d\n",
1732 diag = rte_eth_rx_queue_setup(pi, qi,
1733 port->nb_rx_desc[qi],
1735 &(port->rx_conf[qi]),
1741 /* Fail to setup rx queue, return */
1742 if (rte_atomic16_cmpset(&(port->port_status),
1744 RTE_PORT_STOPPED) == 0)
1745 printf("Port %d can not be set back "
1746 "to stopped\n", pi);
1747 printf("Fail to configure port %d rx queues\n",
1749 /* try to reconfigure queues next time */
1750 port->need_reconfig_queues = 1;
1756 if (rte_eth_dev_start(pi) < 0) {
1757 printf("Fail to start port %d\n", pi);
1759 /* Fail to setup rx queue, return */
1760 if (rte_atomic16_cmpset(&(port->port_status),
1761 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1762 printf("Port %d can not be set back to "
1767 if (rte_atomic16_cmpset(&(port->port_status),
1768 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1769 printf("Port %d can not be set into started\n", pi);
1771 rte_eth_macaddr_get(pi, &mac_addr);
1772 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1773 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1774 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1775 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1777 /* at least one port started, need checking link status */
1778 need_check_link_status = 1;
1781 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1782 event_type < RTE_ETH_EVENT_MAX;
1784 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1789 printf("Failed to setup even callback for event %d\n",
1795 if (need_check_link_status == 1 && !no_link_check)
1796 check_all_ports_link_status(RTE_PORT_ALL);
1797 else if (need_check_link_status == 0)
1798 printf("Please stop the ports first\n");
1805 stop_port(portid_t pid)
1808 struct rte_port *port;
1809 int need_check_link_status = 0;
1816 if (port_id_is_invalid(pid, ENABLED_WARN))
1819 printf("Stopping ports...\n");
1821 RTE_ETH_FOREACH_DEV(pi) {
1822 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1825 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1826 printf("Please remove port %d from forwarding configuration.\n", pi);
1830 if (port_is_bonding_slave(pi)) {
1831 printf("Please remove port %d from bonded device.\n", pi);
1836 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1837 RTE_PORT_HANDLING) == 0)
1840 rte_eth_dev_stop(pi);
1842 if (rte_atomic16_cmpset(&(port->port_status),
1843 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1844 printf("Port %d can not be set into stopped\n", pi);
1845 need_check_link_status = 1;
1847 if (need_check_link_status && !no_link_check)
1848 check_all_ports_link_status(RTE_PORT_ALL);
1854 close_port(portid_t pid)
1857 struct rte_port *port;
1859 if (port_id_is_invalid(pid, ENABLED_WARN))
1862 printf("Closing ports...\n");
1864 RTE_ETH_FOREACH_DEV(pi) {
1865 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1868 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1869 printf("Please remove port %d from forwarding configuration.\n", pi);
1873 if (port_is_bonding_slave(pi)) {
1874 printf("Please remove port %d from bonded device.\n", pi);
1879 if (rte_atomic16_cmpset(&(port->port_status),
1880 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1881 printf("Port %d is already closed\n", pi);
1885 if (rte_atomic16_cmpset(&(port->port_status),
1886 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1887 printf("Port %d is now not stopped\n", pi);
1891 if (port->flow_list)
1892 port_flow_flush(pi);
1893 rte_eth_dev_close(pi);
1895 if (rte_atomic16_cmpset(&(port->port_status),
1896 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1897 printf("Port %d cannot be set to closed\n", pi);
1904 reset_port(portid_t pid)
1908 struct rte_port *port;
1910 if (port_id_is_invalid(pid, ENABLED_WARN))
1913 printf("Resetting ports...\n");
1915 RTE_ETH_FOREACH_DEV(pi) {
1916 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1919 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1920 printf("Please remove port %d from forwarding "
1921 "configuration.\n", pi);
1925 if (port_is_bonding_slave(pi)) {
1926 printf("Please remove port %d from bonded device.\n",
1931 diag = rte_eth_dev_reset(pi);
1934 port->need_reconfig = 1;
1935 port->need_reconfig_queues = 1;
1937 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1945 eth_dev_event_callback_register(void)
1949 /* register the device event callback */
1950 ret = rte_dev_event_callback_register(NULL,
1951 eth_dev_event_callback, NULL);
1953 printf("Failed to register device event callback\n");
1962 eth_dev_event_callback_unregister(void)
1966 /* unregister the device event callback */
1967 ret = rte_dev_event_callback_unregister(NULL,
1968 eth_dev_event_callback, NULL);
1970 printf("Failed to unregister device event callback\n");
1978 attach_port(char *identifier)
1981 unsigned int socket_id;
1983 printf("Attaching a new port...\n");
1985 if (identifier == NULL) {
1986 printf("Invalid parameters are specified\n");
1990 if (rte_eth_dev_attach(identifier, &pi))
1993 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1994 /* if socket_id is invalid, set to 0 */
1995 if (check_socket_id(socket_id) < 0)
1997 reconfig(pi, socket_id);
1998 rte_eth_promiscuous_enable(pi);
2000 nb_ports = rte_eth_dev_count_avail();
2002 ports[pi].port_status = RTE_PORT_STOPPED;
2004 update_fwd_ports(pi);
2006 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2011 detach_port(portid_t port_id)
2013 char name[RTE_ETH_NAME_MAX_LEN];
2015 printf("Detaching a port...\n");
2017 if (!port_is_closed(port_id)) {
2018 printf("Please close port first\n");
2022 if (ports[port_id].flow_list)
2023 port_flow_flush(port_id);
2025 if (rte_eth_dev_detach(port_id, name)) {
2026 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2030 nb_ports = rte_eth_dev_count_avail();
2032 update_fwd_ports(RTE_MAX_ETHPORTS);
2034 printf("Port %u is detached. Now total ports is %d\n",
2043 struct rte_device *device;
2048 stop_packet_forwarding();
2050 if (ports != NULL) {
2052 RTE_ETH_FOREACH_DEV(pt_id) {
2053 printf("\nShutting down port %d...\n", pt_id);
2059 * This is a workaround to fix a virtio-user issue that
2060 * requires to call clean-up routine to remove existing
2062 * This workaround valid only for testpmd, needs a fix
2063 * valid for all applications.
2064 * TODO: Implement proper resource cleanup
2066 device = rte_eth_devices[pt_id].device;
2067 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2073 ret = rte_dev_event_monitor_stop();
2076 "fail to stop device event monitor.");
2078 ret = eth_dev_event_callback_unregister();
2081 "fail to unregister all event callbacks.");
2084 printf("\nBye...\n");
2087 typedef void (*cmd_func_t)(void);
2088 struct pmd_test_command {
2089 const char *cmd_name;
2090 cmd_func_t cmd_func;
2093 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2095 /* Check the link status of all ports in up to 9s, and print them finally */
2097 check_all_ports_link_status(uint32_t port_mask)
2099 #define CHECK_INTERVAL 100 /* 100ms */
2100 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2102 uint8_t count, all_ports_up, print_flag = 0;
2103 struct rte_eth_link link;
2105 printf("Checking link statuses...\n");
2107 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2109 RTE_ETH_FOREACH_DEV(portid) {
2110 if ((port_mask & (1 << portid)) == 0)
2112 memset(&link, 0, sizeof(link));
2113 rte_eth_link_get_nowait(portid, &link);
2114 /* print link status if flag set */
2115 if (print_flag == 1) {
2116 if (link.link_status)
2118 "Port%d Link Up. speed %u Mbps- %s\n",
2119 portid, link.link_speed,
2120 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2121 ("full-duplex") : ("half-duplex\n"));
2123 printf("Port %d Link Down\n", portid);
2126 /* clear all_ports_up flag if any link down */
2127 if (link.link_status == ETH_LINK_DOWN) {
2132 /* after finally printing all link status, get out */
2133 if (print_flag == 1)
2136 if (all_ports_up == 0) {
2138 rte_delay_ms(CHECK_INTERVAL);
2141 /* set the print_flag if all ports up or timeout */
2142 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2152 rmv_event_callback(void *arg)
2154 int need_to_start = 0;
2155 int org_no_link_check = no_link_check;
2156 portid_t port_id = (intptr_t)arg;
2158 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2160 if (!test_done && port_is_forwarding(port_id)) {
2162 stop_packet_forwarding();
2166 no_link_check = org_no_link_check;
2167 close_port(port_id);
2168 detach_port(port_id);
2170 start_packet_forwarding(0);
2173 /* This function is used by the interrupt thread */
2175 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2178 static const char * const event_desc[] = {
2179 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2180 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2181 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2182 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2183 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2184 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2185 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2186 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2187 [RTE_ETH_EVENT_NEW] = "device probed",
2188 [RTE_ETH_EVENT_DESTROY] = "device released",
2189 [RTE_ETH_EVENT_MAX] = NULL,
2192 RTE_SET_USED(param);
2193 RTE_SET_USED(ret_param);
2195 if (type >= RTE_ETH_EVENT_MAX) {
2196 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2197 port_id, __func__, type);
2199 } else if (event_print_mask & (UINT32_C(1) << type)) {
2200 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2205 if (port_id_is_invalid(port_id, DISABLED_WARN))
2209 case RTE_ETH_EVENT_INTR_RMV:
2210 if (rte_eal_alarm_set(100000,
2211 rmv_event_callback, (void *)(intptr_t)port_id))
2212 fprintf(stderr, "Could not set up deferred device removal\n");
2220 /* This function is used by the interrupt thread */
2222 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2223 __rte_unused void *arg)
2225 if (type >= RTE_DEV_EVENT_MAX) {
2226 fprintf(stderr, "%s called upon invalid event %d\n",
2232 case RTE_DEV_EVENT_REMOVE:
2233 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2235 /* TODO: After finish failure handle, begin to stop
2236 * packet forward, stop port, close port, detach port.
2239 case RTE_DEV_EVENT_ADD:
2240 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2242 /* TODO: After finish kernel driver binding,
2243 * begin to attach port.
2252 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2256 uint8_t mapping_found = 0;
2258 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2259 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2260 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2261 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2262 tx_queue_stats_mappings[i].queue_id,
2263 tx_queue_stats_mappings[i].stats_counter_id);
2270 port->tx_queue_stats_mapping_enabled = 1;
2275 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2279 uint8_t mapping_found = 0;
2281 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2282 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2283 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2284 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2285 rx_queue_stats_mappings[i].queue_id,
2286 rx_queue_stats_mappings[i].stats_counter_id);
2293 port->rx_queue_stats_mapping_enabled = 1;
2298 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2302 diag = set_tx_queue_stats_mapping_registers(pi, port);
2304 if (diag == -ENOTSUP) {
2305 port->tx_queue_stats_mapping_enabled = 0;
2306 printf("TX queue stats mapping not supported port id=%d\n", pi);
2309 rte_exit(EXIT_FAILURE,
2310 "set_tx_queue_stats_mapping_registers "
2311 "failed for port id=%d diag=%d\n",
2315 diag = set_rx_queue_stats_mapping_registers(pi, port);
2317 if (diag == -ENOTSUP) {
2318 port->rx_queue_stats_mapping_enabled = 0;
2319 printf("RX queue stats mapping not supported port id=%d\n", pi);
2322 rte_exit(EXIT_FAILURE,
2323 "set_rx_queue_stats_mapping_registers "
2324 "failed for port id=%d diag=%d\n",
2330 rxtx_port_config(struct rte_port *port)
2334 for (qid = 0; qid < nb_rxq; qid++) {
2335 port->rx_conf[qid] = port->dev_info.default_rxconf;
2337 /* Check if any Rx parameters have been passed */
2338 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2339 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2341 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2342 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2344 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2345 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2347 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2348 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2350 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2351 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2353 port->nb_rx_desc[qid] = nb_rxd;
2356 for (qid = 0; qid < nb_txq; qid++) {
2357 port->tx_conf[qid] = port->dev_info.default_txconf;
2359 /* Check if any Tx parameters have been passed */
2360 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2361 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2363 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2364 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2366 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2367 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2369 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2370 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2372 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2373 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2375 port->nb_tx_desc[qid] = nb_txd;
2380 init_port_config(void)
2383 struct rte_port *port;
2385 RTE_ETH_FOREACH_DEV(pid) {
2387 port->dev_conf.fdir_conf = fdir_conf;
2388 rte_eth_dev_info_get(pid, &port->dev_info);
2390 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2391 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2392 rss_hf & port->dev_info.flow_type_rss_offloads;
2394 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2395 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2398 if (port->dcb_flag == 0) {
2399 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2400 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2402 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2405 rxtx_port_config(port);
2407 rte_eth_macaddr_get(pid, &port->eth_addr);
2409 map_port_queue_stats_mapping_registers(pid, port);
2410 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2411 rte_pmd_ixgbe_bypass_init(pid);
2414 if (lsc_interrupt &&
2415 (rte_eth_devices[pid].data->dev_flags &
2416 RTE_ETH_DEV_INTR_LSC))
2417 port->dev_conf.intr_conf.lsc = 1;
2418 if (rmv_interrupt &&
2419 (rte_eth_devices[pid].data->dev_flags &
2420 RTE_ETH_DEV_INTR_RMV))
2421 port->dev_conf.intr_conf.rmv = 1;
2425 void set_port_slave_flag(portid_t slave_pid)
2427 struct rte_port *port;
2429 port = &ports[slave_pid];
2430 port->slave_flag = 1;
2433 void clear_port_slave_flag(portid_t slave_pid)
2435 struct rte_port *port;
2437 port = &ports[slave_pid];
2438 port->slave_flag = 0;
2441 uint8_t port_is_bonding_slave(portid_t slave_pid)
2443 struct rte_port *port;
2445 port = &ports[slave_pid];
2446 if ((rte_eth_devices[slave_pid].data->dev_flags &
2447 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2452 const uint16_t vlan_tags[] = {
2453 0, 1, 2, 3, 4, 5, 6, 7,
2454 8, 9, 10, 11, 12, 13, 14, 15,
2455 16, 17, 18, 19, 20, 21, 22, 23,
2456 24, 25, 26, 27, 28, 29, 30, 31
2460 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2461 enum dcb_mode_enable dcb_mode,
2462 enum rte_eth_nb_tcs num_tcs,
2468 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2469 * given above, and the number of traffic classes available for use.
2471 if (dcb_mode == DCB_VT_ENABLED) {
2472 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2473 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2474 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2475 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2477 /* VMDQ+DCB RX and TX configurations */
2478 vmdq_rx_conf->enable_default_pool = 0;
2479 vmdq_rx_conf->default_pool = 0;
2480 vmdq_rx_conf->nb_queue_pools =
2481 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2482 vmdq_tx_conf->nb_queue_pools =
2483 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2485 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2486 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2487 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2488 vmdq_rx_conf->pool_map[i].pools =
2489 1 << (i % vmdq_rx_conf->nb_queue_pools);
2491 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2492 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2493 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2496 /* set DCB mode of RX and TX of multiple queues */
2497 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2498 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2500 struct rte_eth_dcb_rx_conf *rx_conf =
2501 ð_conf->rx_adv_conf.dcb_rx_conf;
2502 struct rte_eth_dcb_tx_conf *tx_conf =
2503 ð_conf->tx_adv_conf.dcb_tx_conf;
2505 rx_conf->nb_tcs = num_tcs;
2506 tx_conf->nb_tcs = num_tcs;
2508 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2509 rx_conf->dcb_tc[i] = i % num_tcs;
2510 tx_conf->dcb_tc[i] = i % num_tcs;
2512 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2513 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2514 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2518 eth_conf->dcb_capability_en =
2519 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2521 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2527 init_port_dcb_config(portid_t pid,
2528 enum dcb_mode_enable dcb_mode,
2529 enum rte_eth_nb_tcs num_tcs,
2532 struct rte_eth_conf port_conf;
2533 struct rte_port *rte_port;
2537 rte_port = &ports[pid];
2539 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2540 /* Enter DCB configuration status */
2543 port_conf.rxmode = rte_port->dev_conf.rxmode;
2544 port_conf.txmode = rte_port->dev_conf.txmode;
2546 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2547 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2550 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2552 /* re-configure the device . */
2553 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2555 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2557 /* If dev_info.vmdq_pool_base is greater than 0,
2558 * the queue id of vmdq pools is started after pf queues.
2560 if (dcb_mode == DCB_VT_ENABLED &&
2561 rte_port->dev_info.vmdq_pool_base > 0) {
2562 printf("VMDQ_DCB multi-queue mode is nonsensical"
2563 " for port %d.", pid);
2567 /* Assume the ports in testpmd have the same dcb capability
2568 * and has the same number of rxq and txq in dcb mode
2570 if (dcb_mode == DCB_VT_ENABLED) {
2571 if (rte_port->dev_info.max_vfs > 0) {
2572 nb_rxq = rte_port->dev_info.nb_rx_queues;
2573 nb_txq = rte_port->dev_info.nb_tx_queues;
2575 nb_rxq = rte_port->dev_info.max_rx_queues;
2576 nb_txq = rte_port->dev_info.max_tx_queues;
2579 /*if vt is disabled, use all pf queues */
2580 if (rte_port->dev_info.vmdq_pool_base == 0) {
2581 nb_rxq = rte_port->dev_info.max_rx_queues;
2582 nb_txq = rte_port->dev_info.max_tx_queues;
2584 nb_rxq = (queueid_t)num_tcs;
2585 nb_txq = (queueid_t)num_tcs;
2589 rx_free_thresh = 64;
2591 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2593 rxtx_port_config(rte_port);
2595 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2596 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2597 rx_vft_set(pid, vlan_tags[i], 1);
2599 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2600 map_port_queue_stats_mapping_registers(pid, rte_port);
2602 rte_port->dcb_flag = 1;
2610 /* Configuration of Ethernet ports. */
2611 ports = rte_zmalloc("testpmd: ports",
2612 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2613 RTE_CACHE_LINE_SIZE);
2614 if (ports == NULL) {
2615 rte_exit(EXIT_FAILURE,
2616 "rte_zmalloc(%d struct rte_port) failed\n",
2632 const char clr[] = { 27, '[', '2', 'J', '\0' };
2633 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2635 /* Clear screen and move to top left */
2636 printf("%s%s", clr, top_left);
2638 printf("\nPort statistics ====================================");
2639 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2640 nic_stats_display(fwd_ports_ids[i]);
2644 signal_handler(int signum)
2646 if (signum == SIGINT || signum == SIGTERM) {
2647 printf("\nSignal %d received, preparing to exit...\n",
2649 #ifdef RTE_LIBRTE_PDUMP
2650 /* uninitialize packet capture framework */
2653 #ifdef RTE_LIBRTE_LATENCY_STATS
2654 rte_latencystats_uninit();
2657 /* Set flag to indicate the force termination. */
2659 /* exit with the expected status */
2660 signal(signum, SIG_DFL);
2661 kill(getpid(), signum);
2666 main(int argc, char** argv)
2672 signal(SIGINT, signal_handler);
2673 signal(SIGTERM, signal_handler);
2675 diag = rte_eal_init(argc, argv);
2677 rte_panic("Cannot init EAL\n");
2679 testpmd_logtype = rte_log_register("testpmd");
2680 if (testpmd_logtype < 0)
2681 rte_panic("Cannot register log type");
2682 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2684 #ifdef RTE_LIBRTE_PDUMP
2685 /* initialize packet capture framework */
2686 rte_pdump_init(NULL);
2689 nb_ports = (portid_t) rte_eth_dev_count_avail();
2691 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2693 /* allocate port structures, and init them */
2696 set_def_fwd_config();
2698 rte_panic("Empty set of forwarding logical cores - check the "
2699 "core mask supplied in the command parameters\n");
2701 /* Bitrate/latency stats disabled by default */
2702 #ifdef RTE_LIBRTE_BITRATE
2703 bitrate_enabled = 0;
2705 #ifdef RTE_LIBRTE_LATENCY_STATS
2706 latencystats_enabled = 0;
2709 /* on FreeBSD, mlockall() is disabled by default */
2710 #ifdef RTE_EXEC_ENV_BSDAPP
2719 launch_args_parse(argc, argv);
2721 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2722 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2726 if (tx_first && interactive)
2727 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2728 "interactive mode.\n");
2730 if (tx_first && lsc_interrupt) {
2731 printf("Warning: lsc_interrupt needs to be off when "
2732 " using tx_first. Disabling.\n");
2736 if (!nb_rxq && !nb_txq)
2737 printf("Warning: Either rx or tx queues should be non-zero\n");
2739 if (nb_rxq > 1 && nb_rxq > nb_txq)
2740 printf("Warning: nb_rxq=%d enables RSS configuration, "
2741 "but nb_txq=%d will prevent to fully test it.\n",
2747 /* enable hot plug monitoring */
2748 ret = rte_dev_event_monitor_start();
2753 eth_dev_event_callback_register();
2757 if (start_port(RTE_PORT_ALL) != 0)
2758 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2760 /* set all ports to promiscuous mode by default */
2761 RTE_ETH_FOREACH_DEV(port_id)
2762 rte_eth_promiscuous_enable(port_id);
2764 /* Init metrics library */
2765 rte_metrics_init(rte_socket_id());
2767 #ifdef RTE_LIBRTE_LATENCY_STATS
2768 if (latencystats_enabled != 0) {
2769 int ret = rte_latencystats_init(1, NULL);
2771 printf("Warning: latencystats init()"
2772 " returned error %d\n", ret);
2773 printf("Latencystats running on lcore %d\n",
2774 latencystats_lcore_id);
2778 /* Setup bitrate stats */
2779 #ifdef RTE_LIBRTE_BITRATE
2780 if (bitrate_enabled != 0) {
2781 bitrate_data = rte_stats_bitrate_create();
2782 if (bitrate_data == NULL)
2783 rte_exit(EXIT_FAILURE,
2784 "Could not allocate bitrate data.\n");
2785 rte_stats_bitrate_reg(bitrate_data);
2789 #ifdef RTE_LIBRTE_CMDLINE
2790 if (strlen(cmdline_filename) != 0)
2791 cmdline_read_from_file(cmdline_filename);
2793 if (interactive == 1) {
2795 printf("Start automatic packet forwarding\n");
2796 start_packet_forwarding(0);
2808 printf("No commandline core given, start packet forwarding\n");
2809 start_packet_forwarding(tx_first);
2810 if (stats_period != 0) {
2811 uint64_t prev_time = 0, cur_time, diff_time = 0;
2812 uint64_t timer_period;
2814 /* Convert to number of cycles */
2815 timer_period = stats_period * rte_get_timer_hz();
2817 while (f_quit == 0) {
2818 cur_time = rte_get_timer_cycles();
2819 diff_time += cur_time - prev_time;
2821 if (diff_time >= timer_period) {
2823 /* Reset the timer */
2826 /* Sleep to avoid unnecessary checks */
2827 prev_time = cur_time;
2832 printf("Press enter to exit\n");
2833 rc = read(0, &c, 1);