1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC
161 #ifdef RTE_LIBRTE_IEEE1588
162 &ieee1588_fwd_engine,
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
175 * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179 * In container, it cannot terminate the process which running with 'stats-period'
180 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
213 * Defaults are supplied by drivers via ethdev.
215 #define RTE_TEST_RX_DESC_DEFAULT 0
216 #define RTE_TEST_TX_DESC_DEFAULT 0
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 #define RTE_PMD_PARAM_UNSET -1
222 * Configurable values of RX and TX ring threshold registers.
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of RX free threshold.
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
239 * Configurable value of RX drop enable.
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
244 * Configurable value of TX free threshold.
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
249 * Configurable value of TX RS bit threshold.
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
254 * Receive Side Scaling (RSS) configuration.
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
259 * Port topology configuration
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
264 * Avoids to flush all the RX streams before starts forwarding.
266 uint8_t no_flush_rx = 0; /* flush by default */
269 * Flow API isolated mode.
271 uint8_t flow_isolate_all;
274 * Avoids to check link status when starting/stopping a port.
276 uint8_t no_link_check = 0; /* check by default */
279 * Enable link status change notification
281 uint8_t lsc_interrupt = 1; /* enabled by default */
284 * Enable device removal notification.
286 uint8_t rmv_interrupt = 1; /* enabled by default */
288 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
291 * Display or mask ether events
292 * Default to all events except VF_MBOX
294 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
295 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
296 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
297 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
298 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
299 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
302 * Decide if all memory are locked for performance.
307 * NIC bypass mode configuration options.
310 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
311 /* The NIC bypass watchdog timeout. */
312 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
316 #ifdef RTE_LIBRTE_LATENCY_STATS
319 * Set when latency stats is enabled in the commandline
321 uint8_t latencystats_enabled;
324 * Lcore ID to serive latency statistics.
326 lcoreid_t latencystats_lcore_id = -1;
331 * Ethernet device configuration.
333 struct rte_eth_rxmode rx_mode = {
334 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
335 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
338 struct rte_eth_txmode tx_mode = {
339 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
342 struct rte_fdir_conf fdir_conf = {
343 .mode = RTE_FDIR_MODE_NONE,
344 .pballoc = RTE_FDIR_PBALLOC_64K,
345 .status = RTE_FDIR_REPORT_STATUS,
347 .vlan_tci_mask = 0xFFEF,
349 .src_ip = 0xFFFFFFFF,
350 .dst_ip = 0xFFFFFFFF,
353 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 .src_port_mask = 0xFFFF,
357 .dst_port_mask = 0xFFFF,
358 .mac_addr_byte_mask = 0xFF,
359 .tunnel_type_mask = 1,
360 .tunnel_id_mask = 0xFFFFFFFF,
365 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
367 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
368 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
371 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
373 uint16_t nb_tx_queue_stats_mappings = 0;
374 uint16_t nb_rx_queue_stats_mappings = 0;
377 * Display zero values by default for xstats
379 uint8_t xstats_hide_zero;
381 unsigned int num_sockets = 0;
382 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
384 #ifdef RTE_LIBRTE_BITRATE
385 /* Bitrate statistics */
386 struct rte_stats_bitrates *bitrate_data;
387 lcoreid_t bitrate_lcore_id;
388 uint8_t bitrate_enabled;
391 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
392 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
394 /* Forward function declarations */
395 static void map_port_queue_stats_mapping_registers(portid_t pi,
396 struct rte_port *port);
397 static void check_all_ports_link_status(uint32_t port_mask);
398 static int eth_event_callback(portid_t port_id,
399 enum rte_eth_event_type type,
400 void *param, void *ret_param);
401 static void eth_dev_event_callback(char *device_name,
402 enum rte_dev_event_type type,
404 static int eth_dev_event_callback_register(void);
405 static int eth_dev_event_callback_unregister(void);
409 * Check if all the ports are started.
410 * If yes, return positive value. If not, return zero.
412 static int all_ports_started(void);
414 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
415 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 * Helper function to check if socket is already discovered.
419 * If yes, return positive value. If not, return zero.
422 new_socket_id(unsigned int socket_id)
426 for (i = 0; i < num_sockets; i++) {
427 if (socket_ids[i] == socket_id)
434 * Setup default configuration.
437 set_default_fwd_lcores_config(void)
441 unsigned int sock_num;
444 for (i = 0; i < RTE_MAX_LCORE; i++) {
445 sock_num = rte_lcore_to_socket_id(i);
446 if (new_socket_id(sock_num)) {
447 if (num_sockets >= RTE_MAX_NUMA_NODES) {
448 rte_exit(EXIT_FAILURE,
449 "Total sockets greater than %u\n",
452 socket_ids[num_sockets++] = sock_num;
454 if (!rte_lcore_is_enabled(i))
456 if (i == rte_get_master_lcore())
458 fwd_lcores_cpuids[nb_lc++] = i;
460 nb_lcores = (lcoreid_t) nb_lc;
461 nb_cfg_lcores = nb_lcores;
466 set_def_peer_eth_addrs(void)
470 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
471 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
472 peer_eth_addrs[i].addr_bytes[5] = i;
477 set_default_fwd_ports_config(void)
482 RTE_ETH_FOREACH_DEV(pt_id)
483 fwd_ports_ids[i++] = pt_id;
485 nb_cfg_ports = nb_ports;
486 nb_fwd_ports = nb_ports;
490 set_def_fwd_config(void)
492 set_default_fwd_lcores_config();
493 set_def_peer_eth_addrs();
494 set_default_fwd_ports_config();
498 * Configuration initialisation done once at init time.
501 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
502 unsigned int socket_id)
504 char pool_name[RTE_MEMPOOL_NAMESIZE];
505 struct rte_mempool *rte_mp = NULL;
508 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
509 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
513 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
517 mb_size, (unsigned) mb_mempool_cache,
518 sizeof(struct rte_pktmbuf_pool_private),
523 if (rte_mempool_populate_anon(rte_mp) == 0) {
524 rte_mempool_free(rte_mp);
528 rte_pktmbuf_pool_init(rte_mp, NULL);
529 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
531 /* wrapper to rte_mempool_create() */
532 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
533 rte_mbuf_best_mempool_ops());
534 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
535 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
539 if (rte_mp == NULL) {
540 rte_exit(EXIT_FAILURE,
541 "Creation of mbuf pool for socket %u failed: %s\n",
542 socket_id, rte_strerror(rte_errno));
543 } else if (verbose_level > 0) {
544 rte_mempool_dump(stdout, rte_mp);
549 * Check given socket id is valid or not with NUMA mode,
550 * if valid, return 0, else return -1
553 check_socket_id(const unsigned int socket_id)
555 static int warning_once = 0;
557 if (new_socket_id(socket_id)) {
558 if (!warning_once && numa_support)
559 printf("Warning: NUMA should be configured manually by"
560 " using --port-numa-config and"
561 " --ring-numa-config parameters along with"
570 * Get the allowed maximum number of RX queues.
571 * *pid return the port id which has minimal value of
572 * max_rx_queues in all ports.
575 get_allowed_max_nb_rxq(portid_t *pid)
577 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
579 struct rte_eth_dev_info dev_info;
581 RTE_ETH_FOREACH_DEV(pi) {
582 rte_eth_dev_info_get(pi, &dev_info);
583 if (dev_info.max_rx_queues < allowed_max_rxq) {
584 allowed_max_rxq = dev_info.max_rx_queues;
588 return allowed_max_rxq;
592 * Check input rxq is valid or not.
593 * If input rxq is not greater than any of maximum number
594 * of RX queues of all ports, it is valid.
595 * if valid, return 0, else return -1
598 check_nb_rxq(queueid_t rxq)
600 queueid_t allowed_max_rxq;
603 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
604 if (rxq > allowed_max_rxq) {
605 printf("Fail: input rxq (%u) can't be greater "
606 "than max_rx_queues (%u) of port %u\n",
616 * Get the allowed maximum number of TX queues.
617 * *pid return the port id which has minimal value of
618 * max_tx_queues in all ports.
621 get_allowed_max_nb_txq(portid_t *pid)
623 queueid_t allowed_max_txq = MAX_QUEUE_ID;
625 struct rte_eth_dev_info dev_info;
627 RTE_ETH_FOREACH_DEV(pi) {
628 rte_eth_dev_info_get(pi, &dev_info);
629 if (dev_info.max_tx_queues < allowed_max_txq) {
630 allowed_max_txq = dev_info.max_tx_queues;
634 return allowed_max_txq;
638 * Check input txq is valid or not.
639 * If input txq is not greater than any of maximum number
640 * of TX queues of all ports, it is valid.
641 * if valid, return 0, else return -1
644 check_nb_txq(queueid_t txq)
646 queueid_t allowed_max_txq;
649 allowed_max_txq = get_allowed_max_nb_txq(&pid);
650 if (txq > allowed_max_txq) {
651 printf("Fail: input txq (%u) can't be greater "
652 "than max_tx_queues (%u) of port %u\n",
665 struct rte_port *port;
666 struct rte_mempool *mbp;
667 unsigned int nb_mbuf_per_pool;
669 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
670 struct rte_gro_param gro_param;
674 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
677 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
678 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
682 /* Configuration of logical cores. */
683 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
684 sizeof(struct fwd_lcore *) * nb_lcores,
685 RTE_CACHE_LINE_SIZE);
686 if (fwd_lcores == NULL) {
687 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
688 "failed\n", nb_lcores);
690 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
691 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
692 sizeof(struct fwd_lcore),
693 RTE_CACHE_LINE_SIZE);
694 if (fwd_lcores[lc_id] == NULL) {
695 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
698 fwd_lcores[lc_id]->cpuid_idx = lc_id;
701 RTE_ETH_FOREACH_DEV(pid) {
703 /* Apply default TxRx configuration for all ports */
704 port->dev_conf.txmode = tx_mode;
705 port->dev_conf.rxmode = rx_mode;
706 rte_eth_dev_info_get(pid, &port->dev_info);
708 if (!(port->dev_info.rx_offload_capa &
709 DEV_RX_OFFLOAD_CRC_STRIP))
710 port->dev_conf.rxmode.offloads &=
711 ~DEV_RX_OFFLOAD_CRC_STRIP;
712 if (!(port->dev_info.tx_offload_capa &
713 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
714 port->dev_conf.txmode.offloads &=
715 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
717 if (port_numa[pid] != NUMA_NO_CONFIG)
718 port_per_socket[port_numa[pid]]++;
720 uint32_t socket_id = rte_eth_dev_socket_id(pid);
722 /* if socket_id is invalid, set to 0 */
723 if (check_socket_id(socket_id) < 0)
725 port_per_socket[socket_id]++;
729 /* Apply Rx offloads configuration */
730 for (k = 0; k < port->dev_info.max_rx_queues; k++)
731 port->rx_conf[k].offloads =
732 port->dev_conf.rxmode.offloads;
733 /* Apply Tx offloads configuration */
734 for (k = 0; k < port->dev_info.max_tx_queues; k++)
735 port->tx_conf[k].offloads =
736 port->dev_conf.txmode.offloads;
738 /* set flag to initialize port/queue */
739 port->need_reconfig = 1;
740 port->need_reconfig_queues = 1;
744 * Create pools of mbuf.
745 * If NUMA support is disabled, create a single pool of mbuf in
746 * socket 0 memory by default.
747 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
749 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
750 * nb_txd can be configured at run time.
752 if (param_total_num_mbufs)
753 nb_mbuf_per_pool = param_total_num_mbufs;
755 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
756 (nb_lcores * mb_mempool_cache) +
757 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
758 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
764 for (i = 0; i < num_sockets; i++)
765 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
768 if (socket_num == UMA_NO_CONFIG)
769 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
771 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
777 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
778 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
780 * Records which Mbuf pool to use by each logical core, if needed.
782 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
783 mbp = mbuf_pool_find(
784 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
787 mbp = mbuf_pool_find(0);
788 fwd_lcores[lc_id]->mbp = mbp;
789 /* initialize GSO context */
790 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
791 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
792 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
793 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
795 fwd_lcores[lc_id]->gso_ctx.flag = 0;
798 /* Configuration of packet forwarding streams. */
799 if (init_fwd_streams() < 0)
800 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
804 /* create a gro context for each lcore */
805 gro_param.gro_types = RTE_GRO_TCP_IPV4;
806 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
807 gro_param.max_item_per_flow = MAX_PKT_BURST;
808 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
809 gro_param.socket_id = rte_lcore_to_socket_id(
810 fwd_lcores_cpuids[lc_id]);
811 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
812 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
813 rte_exit(EXIT_FAILURE,
814 "rte_gro_ctx_create() failed\n");
818 #if defined RTE_LIBRTE_PMD_SOFTNIC
819 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
820 RTE_ETH_FOREACH_DEV(pid) {
822 const char *driver = port->dev_info.driver_name;
824 if (strcmp(driver, "net_softnic") == 0)
825 port->softport.fwd_lcore_arg = fwd_lcores;
834 reconfig(portid_t new_port_id, unsigned socket_id)
836 struct rte_port *port;
838 /* Reconfiguration of Ethernet ports. */
839 port = &ports[new_port_id];
840 rte_eth_dev_info_get(new_port_id, &port->dev_info);
842 /* set flag to initialize port/queue */
843 port->need_reconfig = 1;
844 port->need_reconfig_queues = 1;
845 port->socket_id = socket_id;
852 init_fwd_streams(void)
855 struct rte_port *port;
856 streamid_t sm_id, nb_fwd_streams_new;
859 /* set socket id according to numa or not */
860 RTE_ETH_FOREACH_DEV(pid) {
862 if (nb_rxq > port->dev_info.max_rx_queues) {
863 printf("Fail: nb_rxq(%d) is greater than "
864 "max_rx_queues(%d)\n", nb_rxq,
865 port->dev_info.max_rx_queues);
868 if (nb_txq > port->dev_info.max_tx_queues) {
869 printf("Fail: nb_txq(%d) is greater than "
870 "max_tx_queues(%d)\n", nb_txq,
871 port->dev_info.max_tx_queues);
875 if (port_numa[pid] != NUMA_NO_CONFIG)
876 port->socket_id = port_numa[pid];
878 port->socket_id = rte_eth_dev_socket_id(pid);
880 /* if socket_id is invalid, set to 0 */
881 if (check_socket_id(port->socket_id) < 0)
886 if (socket_num == UMA_NO_CONFIG)
889 port->socket_id = socket_num;
893 q = RTE_MAX(nb_rxq, nb_txq);
895 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
898 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
899 if (nb_fwd_streams_new == nb_fwd_streams)
902 if (fwd_streams != NULL) {
903 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
904 if (fwd_streams[sm_id] == NULL)
906 rte_free(fwd_streams[sm_id]);
907 fwd_streams[sm_id] = NULL;
909 rte_free(fwd_streams);
914 nb_fwd_streams = nb_fwd_streams_new;
915 if (nb_fwd_streams) {
916 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
917 sizeof(struct fwd_stream *) * nb_fwd_streams,
918 RTE_CACHE_LINE_SIZE);
919 if (fwd_streams == NULL)
920 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
921 " (struct fwd_stream *)) failed\n",
924 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
925 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
926 " struct fwd_stream", sizeof(struct fwd_stream),
927 RTE_CACHE_LINE_SIZE);
928 if (fwd_streams[sm_id] == NULL)
929 rte_exit(EXIT_FAILURE, "rte_zmalloc"
930 "(struct fwd_stream) failed\n");
937 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
939 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
941 unsigned int total_burst;
942 unsigned int nb_burst;
943 unsigned int burst_stats[3];
944 uint16_t pktnb_stats[3];
946 int burst_percent[3];
949 * First compute the total number of packet bursts and the
950 * two highest numbers of bursts of the same number of packets.
953 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
954 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
955 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
956 nb_burst = pbs->pkt_burst_spread[nb_pkt];
959 total_burst += nb_burst;
960 if (nb_burst > burst_stats[0]) {
961 burst_stats[1] = burst_stats[0];
962 pktnb_stats[1] = pktnb_stats[0];
963 burst_stats[0] = nb_burst;
964 pktnb_stats[0] = nb_pkt;
965 } else if (nb_burst > burst_stats[1]) {
966 burst_stats[1] = nb_burst;
967 pktnb_stats[1] = nb_pkt;
970 if (total_burst == 0)
972 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
973 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
974 burst_percent[0], (int) pktnb_stats[0]);
975 if (burst_stats[0] == total_burst) {
979 if (burst_stats[0] + burst_stats[1] == total_burst) {
980 printf(" + %d%% of %d pkts]\n",
981 100 - burst_percent[0], pktnb_stats[1]);
984 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
985 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
986 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
987 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
990 printf(" + %d%% of %d pkts + %d%% of others]\n",
991 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
993 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
996 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
998 struct rte_port *port;
1001 static const char *fwd_stats_border = "----------------------";
1003 port = &ports[port_id];
1004 printf("\n %s Forward statistics for port %-2d %s\n",
1005 fwd_stats_border, port_id, fwd_stats_border);
1007 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1008 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1010 stats->ipackets, stats->imissed,
1011 (uint64_t) (stats->ipackets + stats->imissed));
1013 if (cur_fwd_eng == &csum_fwd_engine)
1014 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1015 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1016 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1017 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1018 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1021 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1023 stats->opackets, port->tx_dropped,
1024 (uint64_t) (stats->opackets + port->tx_dropped));
1027 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1029 stats->ipackets, stats->imissed,
1030 (uint64_t) (stats->ipackets + stats->imissed));
1032 if (cur_fwd_eng == &csum_fwd_engine)
1033 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1034 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1035 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1036 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1037 printf(" RX-nombufs: %14"PRIu64"\n",
1041 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1043 stats->opackets, port->tx_dropped,
1044 (uint64_t) (stats->opackets + port->tx_dropped));
1047 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1048 if (port->rx_stream)
1049 pkt_burst_stats_display("RX",
1050 &port->rx_stream->rx_burst_stats);
1051 if (port->tx_stream)
1052 pkt_burst_stats_display("TX",
1053 &port->tx_stream->tx_burst_stats);
1056 if (port->rx_queue_stats_mapping_enabled) {
1058 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1059 printf(" Stats reg %2d RX-packets:%14"PRIu64
1060 " RX-errors:%14"PRIu64
1061 " RX-bytes:%14"PRIu64"\n",
1062 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1066 if (port->tx_queue_stats_mapping_enabled) {
1067 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1068 printf(" Stats reg %2d TX-packets:%14"PRIu64
1069 " TX-bytes:%14"PRIu64"\n",
1070 i, stats->q_opackets[i], stats->q_obytes[i]);
1074 printf(" %s--------------------------------%s\n",
1075 fwd_stats_border, fwd_stats_border);
1079 fwd_stream_stats_display(streamid_t stream_id)
1081 struct fwd_stream *fs;
1082 static const char *fwd_top_stats_border = "-------";
1084 fs = fwd_streams[stream_id];
1085 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1086 (fs->fwd_dropped == 0))
1088 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1089 "TX Port=%2d/Queue=%2d %s\n",
1090 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1091 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1092 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1093 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1095 /* if checksum mode */
1096 if (cur_fwd_eng == &csum_fwd_engine) {
1097 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1098 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1101 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1102 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1103 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1108 flush_fwd_rx_queues(void)
1110 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1117 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1118 uint64_t timer_period;
1120 /* convert to number of cycles */
1121 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1123 for (j = 0; j < 2; j++) {
1124 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1125 for (rxq = 0; rxq < nb_rxq; rxq++) {
1126 port_id = fwd_ports_ids[rxp];
1128 * testpmd can stuck in the below do while loop
1129 * if rte_eth_rx_burst() always returns nonzero
1130 * packets. So timer is added to exit this loop
1131 * after 1sec timer expiry.
1133 prev_tsc = rte_rdtsc();
1135 nb_rx = rte_eth_rx_burst(port_id, rxq,
1136 pkts_burst, MAX_PKT_BURST);
1137 for (i = 0; i < nb_rx; i++)
1138 rte_pktmbuf_free(pkts_burst[i]);
1140 cur_tsc = rte_rdtsc();
1141 diff_tsc = cur_tsc - prev_tsc;
1142 timer_tsc += diff_tsc;
1143 } while ((nb_rx > 0) &&
1144 (timer_tsc < timer_period));
1148 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1153 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1155 struct fwd_stream **fsm;
1158 #ifdef RTE_LIBRTE_BITRATE
1159 uint64_t tics_per_1sec;
1160 uint64_t tics_datum;
1161 uint64_t tics_current;
1164 tics_datum = rte_rdtsc();
1165 tics_per_1sec = rte_get_timer_hz();
1167 fsm = &fwd_streams[fc->stream_idx];
1168 nb_fs = fc->stream_nb;
1170 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1171 (*pkt_fwd)(fsm[sm_id]);
1172 #ifdef RTE_LIBRTE_BITRATE
1173 if (bitrate_enabled != 0 &&
1174 bitrate_lcore_id == rte_lcore_id()) {
1175 tics_current = rte_rdtsc();
1176 if (tics_current - tics_datum >= tics_per_1sec) {
1177 /* Periodic bitrate calculation */
1178 RTE_ETH_FOREACH_DEV(idx_port)
1179 rte_stats_bitrate_calc(bitrate_data,
1181 tics_datum = tics_current;
1185 #ifdef RTE_LIBRTE_LATENCY_STATS
1186 if (latencystats_enabled != 0 &&
1187 latencystats_lcore_id == rte_lcore_id())
1188 rte_latencystats_update();
1191 } while (! fc->stopped);
1195 start_pkt_forward_on_core(void *fwd_arg)
1197 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1198 cur_fwd_config.fwd_eng->packet_fwd);
1203 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1204 * Used to start communication flows in network loopback test configurations.
1207 run_one_txonly_burst_on_core(void *fwd_arg)
1209 struct fwd_lcore *fwd_lc;
1210 struct fwd_lcore tmp_lcore;
1212 fwd_lc = (struct fwd_lcore *) fwd_arg;
1213 tmp_lcore = *fwd_lc;
1214 tmp_lcore.stopped = 1;
1215 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1220 * Launch packet forwarding:
1221 * - Setup per-port forwarding context.
1222 * - launch logical cores with their forwarding configuration.
1225 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1227 port_fwd_begin_t port_fwd_begin;
1232 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1233 if (port_fwd_begin != NULL) {
1234 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1235 (*port_fwd_begin)(fwd_ports_ids[i]);
1237 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1238 lc_id = fwd_lcores_cpuids[i];
1239 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1240 fwd_lcores[i]->stopped = 0;
1241 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1242 fwd_lcores[i], lc_id);
1244 printf("launch lcore %u failed - diag=%d\n",
1251 * Update the forward ports list.
1254 update_fwd_ports(portid_t new_pid)
1257 unsigned int new_nb_fwd_ports = 0;
1260 for (i = 0; i < nb_fwd_ports; ++i) {
1261 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1264 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1268 if (new_pid < RTE_MAX_ETHPORTS)
1269 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1271 nb_fwd_ports = new_nb_fwd_ports;
1272 nb_cfg_ports = new_nb_fwd_ports;
1276 * Launch packet forwarding configuration.
1279 start_packet_forwarding(int with_tx_first)
1281 port_fwd_begin_t port_fwd_begin;
1282 port_fwd_end_t port_fwd_end;
1283 struct rte_port *port;
1288 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1289 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1291 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1292 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1294 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1295 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1296 (!nb_rxq || !nb_txq))
1297 rte_exit(EXIT_FAILURE,
1298 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1299 cur_fwd_eng->fwd_mode_name);
1301 if (all_ports_started() == 0) {
1302 printf("Not all ports were started\n");
1305 if (test_done == 0) {
1306 printf("Packet forwarding already started\n");
1312 for (i = 0; i < nb_fwd_ports; i++) {
1313 pt_id = fwd_ports_ids[i];
1314 port = &ports[pt_id];
1315 if (!port->dcb_flag) {
1316 printf("In DCB mode, all forwarding ports must "
1317 "be configured in this mode.\n");
1321 if (nb_fwd_lcores == 1) {
1322 printf("In DCB mode,the nb forwarding cores "
1323 "should be larger than 1.\n");
1332 flush_fwd_rx_queues();
1334 pkt_fwd_config_display(&cur_fwd_config);
1335 rxtx_config_display();
1337 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1338 pt_id = fwd_ports_ids[i];
1339 port = &ports[pt_id];
1340 rte_eth_stats_get(pt_id, &port->stats);
1341 port->tx_dropped = 0;
1343 map_port_queue_stats_mapping_registers(pt_id, port);
1345 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1346 fwd_streams[sm_id]->rx_packets = 0;
1347 fwd_streams[sm_id]->tx_packets = 0;
1348 fwd_streams[sm_id]->fwd_dropped = 0;
1349 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1350 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1352 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1353 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1354 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1355 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1356 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1358 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1359 fwd_streams[sm_id]->core_cycles = 0;
1362 if (with_tx_first) {
1363 port_fwd_begin = tx_only_engine.port_fwd_begin;
1364 if (port_fwd_begin != NULL) {
1365 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1366 (*port_fwd_begin)(fwd_ports_ids[i]);
1368 while (with_tx_first--) {
1369 launch_packet_forwarding(
1370 run_one_txonly_burst_on_core);
1371 rte_eal_mp_wait_lcore();
1373 port_fwd_end = tx_only_engine.port_fwd_end;
1374 if (port_fwd_end != NULL) {
1375 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1376 (*port_fwd_end)(fwd_ports_ids[i]);
1379 launch_packet_forwarding(start_pkt_forward_on_core);
1383 stop_packet_forwarding(void)
1385 struct rte_eth_stats stats;
1386 struct rte_port *port;
1387 port_fwd_end_t port_fwd_end;
1392 uint64_t total_recv;
1393 uint64_t total_xmit;
1394 uint64_t total_rx_dropped;
1395 uint64_t total_tx_dropped;
1396 uint64_t total_rx_nombuf;
1397 uint64_t tx_dropped;
1398 uint64_t rx_bad_ip_csum;
1399 uint64_t rx_bad_l4_csum;
1400 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1401 uint64_t fwd_cycles;
1404 static const char *acc_stats_border = "+++++++++++++++";
1407 printf("Packet forwarding not started\n");
1410 printf("Telling cores to stop...");
1411 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1412 fwd_lcores[lc_id]->stopped = 1;
1413 printf("\nWaiting for lcores to finish...\n");
1414 rte_eal_mp_wait_lcore();
1415 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1416 if (port_fwd_end != NULL) {
1417 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1418 pt_id = fwd_ports_ids[i];
1419 (*port_fwd_end)(pt_id);
1422 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1425 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1426 if (cur_fwd_config.nb_fwd_streams >
1427 cur_fwd_config.nb_fwd_ports) {
1428 fwd_stream_stats_display(sm_id);
1429 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1430 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1432 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1434 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1437 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1438 tx_dropped = (uint64_t) (tx_dropped +
1439 fwd_streams[sm_id]->fwd_dropped);
1440 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1443 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1444 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1445 fwd_streams[sm_id]->rx_bad_ip_csum);
1446 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1450 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1451 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1452 fwd_streams[sm_id]->rx_bad_l4_csum);
1453 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1456 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1457 fwd_cycles = (uint64_t) (fwd_cycles +
1458 fwd_streams[sm_id]->core_cycles);
1463 total_rx_dropped = 0;
1464 total_tx_dropped = 0;
1465 total_rx_nombuf = 0;
1466 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1467 pt_id = fwd_ports_ids[i];
1469 port = &ports[pt_id];
1470 rte_eth_stats_get(pt_id, &stats);
1471 stats.ipackets -= port->stats.ipackets;
1472 port->stats.ipackets = 0;
1473 stats.opackets -= port->stats.opackets;
1474 port->stats.opackets = 0;
1475 stats.ibytes -= port->stats.ibytes;
1476 port->stats.ibytes = 0;
1477 stats.obytes -= port->stats.obytes;
1478 port->stats.obytes = 0;
1479 stats.imissed -= port->stats.imissed;
1480 port->stats.imissed = 0;
1481 stats.oerrors -= port->stats.oerrors;
1482 port->stats.oerrors = 0;
1483 stats.rx_nombuf -= port->stats.rx_nombuf;
1484 port->stats.rx_nombuf = 0;
1486 total_recv += stats.ipackets;
1487 total_xmit += stats.opackets;
1488 total_rx_dropped += stats.imissed;
1489 total_tx_dropped += port->tx_dropped;
1490 total_rx_nombuf += stats.rx_nombuf;
1492 fwd_port_stats_display(pt_id, &stats);
1495 printf("\n %s Accumulated forward statistics for all ports"
1497 acc_stats_border, acc_stats_border);
1498 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1500 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1502 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1503 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1504 if (total_rx_nombuf > 0)
1505 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1506 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1508 acc_stats_border, acc_stats_border);
1509 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1511 printf("\n CPU cycles/packet=%u (total cycles="
1512 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1513 (unsigned int)(fwd_cycles / total_recv),
1514 fwd_cycles, total_recv);
1516 printf("\nDone.\n");
1521 dev_set_link_up(portid_t pid)
1523 if (rte_eth_dev_set_link_up(pid) < 0)
1524 printf("\nSet link up fail.\n");
1528 dev_set_link_down(portid_t pid)
1530 if (rte_eth_dev_set_link_down(pid) < 0)
1531 printf("\nSet link down fail.\n");
1535 all_ports_started(void)
1538 struct rte_port *port;
1540 RTE_ETH_FOREACH_DEV(pi) {
1542 /* Check if there is a port which is not started */
1543 if ((port->port_status != RTE_PORT_STARTED) &&
1544 (port->slave_flag == 0))
1548 /* No port is not started */
1553 port_is_stopped(portid_t port_id)
1555 struct rte_port *port = &ports[port_id];
1557 if ((port->port_status != RTE_PORT_STOPPED) &&
1558 (port->slave_flag == 0))
1564 all_ports_stopped(void)
1568 RTE_ETH_FOREACH_DEV(pi) {
1569 if (!port_is_stopped(pi))
1577 port_is_started(portid_t port_id)
1579 if (port_id_is_invalid(port_id, ENABLED_WARN))
1582 if (ports[port_id].port_status != RTE_PORT_STARTED)
1589 port_is_closed(portid_t port_id)
1591 if (port_id_is_invalid(port_id, ENABLED_WARN))
1594 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1601 start_port(portid_t pid)
1603 int diag, need_check_link_status = -1;
1606 struct rte_port *port;
1607 struct ether_addr mac_addr;
1608 enum rte_eth_event_type event_type;
1610 if (port_id_is_invalid(pid, ENABLED_WARN))
1615 RTE_ETH_FOREACH_DEV(pi) {
1616 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1619 need_check_link_status = 0;
1621 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1622 RTE_PORT_HANDLING) == 0) {
1623 printf("Port %d is now not stopped\n", pi);
1627 if (port->need_reconfig > 0) {
1628 port->need_reconfig = 0;
1630 if (flow_isolate_all) {
1631 int ret = port_flow_isolate(pi, 1);
1633 printf("Failed to apply isolated"
1634 " mode on port %d\n", pi);
1639 printf("Configuring Port %d (socket %u)\n", pi,
1641 /* configure port */
1642 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1645 if (rte_atomic16_cmpset(&(port->port_status),
1646 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1647 printf("Port %d can not be set back "
1648 "to stopped\n", pi);
1649 printf("Fail to configure port %d\n", pi);
1650 /* try to reconfigure port next time */
1651 port->need_reconfig = 1;
1655 if (port->need_reconfig_queues > 0) {
1656 port->need_reconfig_queues = 0;
1657 /* setup tx queues */
1658 for (qi = 0; qi < nb_txq; qi++) {
1659 if ((numa_support) &&
1660 (txring_numa[pi] != NUMA_NO_CONFIG))
1661 diag = rte_eth_tx_queue_setup(pi, qi,
1662 port->nb_tx_desc[qi],
1664 &(port->tx_conf[qi]));
1666 diag = rte_eth_tx_queue_setup(pi, qi,
1667 port->nb_tx_desc[qi],
1669 &(port->tx_conf[qi]));
1674 /* Fail to setup tx queue, return */
1675 if (rte_atomic16_cmpset(&(port->port_status),
1677 RTE_PORT_STOPPED) == 0)
1678 printf("Port %d can not be set back "
1679 "to stopped\n", pi);
1680 printf("Fail to configure port %d tx queues\n",
1682 /* try to reconfigure queues next time */
1683 port->need_reconfig_queues = 1;
1686 for (qi = 0; qi < nb_rxq; qi++) {
1687 /* setup rx queues */
1688 if ((numa_support) &&
1689 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1690 struct rte_mempool * mp =
1691 mbuf_pool_find(rxring_numa[pi]);
1693 printf("Failed to setup RX queue:"
1694 "No mempool allocation"
1695 " on the socket %d\n",
1700 diag = rte_eth_rx_queue_setup(pi, qi,
1701 port->nb_rx_desc[qi],
1703 &(port->rx_conf[qi]),
1706 struct rte_mempool *mp =
1707 mbuf_pool_find(port->socket_id);
1709 printf("Failed to setup RX queue:"
1710 "No mempool allocation"
1711 " on the socket %d\n",
1715 diag = rte_eth_rx_queue_setup(pi, qi,
1716 port->nb_rx_desc[qi],
1718 &(port->rx_conf[qi]),
1724 /* Fail to setup rx queue, return */
1725 if (rte_atomic16_cmpset(&(port->port_status),
1727 RTE_PORT_STOPPED) == 0)
1728 printf("Port %d can not be set back "
1729 "to stopped\n", pi);
1730 printf("Fail to configure port %d rx queues\n",
1732 /* try to reconfigure queues next time */
1733 port->need_reconfig_queues = 1;
1739 if (rte_eth_dev_start(pi) < 0) {
1740 printf("Fail to start port %d\n", pi);
1742 /* Fail to setup rx queue, return */
1743 if (rte_atomic16_cmpset(&(port->port_status),
1744 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1745 printf("Port %d can not be set back to "
1750 if (rte_atomic16_cmpset(&(port->port_status),
1751 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1752 printf("Port %d can not be set into started\n", pi);
1754 rte_eth_macaddr_get(pi, &mac_addr);
1755 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1756 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1757 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1758 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1760 /* at least one port started, need checking link status */
1761 need_check_link_status = 1;
1764 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1765 event_type < RTE_ETH_EVENT_MAX;
1767 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1772 printf("Failed to setup even callback for event %d\n",
1778 if (need_check_link_status == 1 && !no_link_check)
1779 check_all_ports_link_status(RTE_PORT_ALL);
1780 else if (need_check_link_status == 0)
1781 printf("Please stop the ports first\n");
1788 stop_port(portid_t pid)
1791 struct rte_port *port;
1792 int need_check_link_status = 0;
1799 if (port_id_is_invalid(pid, ENABLED_WARN))
1802 printf("Stopping ports...\n");
1804 RTE_ETH_FOREACH_DEV(pi) {
1805 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1808 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1809 printf("Please remove port %d from forwarding configuration.\n", pi);
1813 if (port_is_bonding_slave(pi)) {
1814 printf("Please remove port %d from bonded device.\n", pi);
1819 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1820 RTE_PORT_HANDLING) == 0)
1823 rte_eth_dev_stop(pi);
1825 if (rte_atomic16_cmpset(&(port->port_status),
1826 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1827 printf("Port %d can not be set into stopped\n", pi);
1828 need_check_link_status = 1;
1830 if (need_check_link_status && !no_link_check)
1831 check_all_ports_link_status(RTE_PORT_ALL);
1837 close_port(portid_t pid)
1840 struct rte_port *port;
1842 if (port_id_is_invalid(pid, ENABLED_WARN))
1845 printf("Closing ports...\n");
1847 RTE_ETH_FOREACH_DEV(pi) {
1848 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1851 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1852 printf("Please remove port %d from forwarding configuration.\n", pi);
1856 if (port_is_bonding_slave(pi)) {
1857 printf("Please remove port %d from bonded device.\n", pi);
1862 if (rte_atomic16_cmpset(&(port->port_status),
1863 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1864 printf("Port %d is already closed\n", pi);
1868 if (rte_atomic16_cmpset(&(port->port_status),
1869 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1870 printf("Port %d is now not stopped\n", pi);
1874 if (port->flow_list)
1875 port_flow_flush(pi);
1876 rte_eth_dev_close(pi);
1878 if (rte_atomic16_cmpset(&(port->port_status),
1879 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1880 printf("Port %d cannot be set to closed\n", pi);
1887 reset_port(portid_t pid)
1891 struct rte_port *port;
1893 if (port_id_is_invalid(pid, ENABLED_WARN))
1896 printf("Resetting ports...\n");
1898 RTE_ETH_FOREACH_DEV(pi) {
1899 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1902 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1903 printf("Please remove port %d from forwarding "
1904 "configuration.\n", pi);
1908 if (port_is_bonding_slave(pi)) {
1909 printf("Please remove port %d from bonded device.\n",
1914 diag = rte_eth_dev_reset(pi);
1917 port->need_reconfig = 1;
1918 port->need_reconfig_queues = 1;
1920 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1928 eth_dev_event_callback_register(void)
1932 /* register the device event callback */
1933 ret = rte_dev_event_callback_register(NULL,
1934 eth_dev_event_callback, NULL);
1936 printf("Failed to register device event callback\n");
1945 eth_dev_event_callback_unregister(void)
1949 /* unregister the device event callback */
1950 ret = rte_dev_event_callback_unregister(NULL,
1951 eth_dev_event_callback, NULL);
1953 printf("Failed to unregister device event callback\n");
1961 attach_port(char *identifier)
1964 unsigned int socket_id;
1966 printf("Attaching a new port...\n");
1968 if (identifier == NULL) {
1969 printf("Invalid parameters are specified\n");
1973 if (rte_eth_dev_attach(identifier, &pi))
1976 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1977 /* if socket_id is invalid, set to 0 */
1978 if (check_socket_id(socket_id) < 0)
1980 reconfig(pi, socket_id);
1981 rte_eth_promiscuous_enable(pi);
1983 nb_ports = rte_eth_dev_count_avail();
1985 ports[pi].port_status = RTE_PORT_STOPPED;
1987 update_fwd_ports(pi);
1989 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1994 detach_port(portid_t port_id)
1996 char name[RTE_ETH_NAME_MAX_LEN];
1998 printf("Detaching a port...\n");
2000 if (!port_is_closed(port_id)) {
2001 printf("Please close port first\n");
2005 if (ports[port_id].flow_list)
2006 port_flow_flush(port_id);
2008 if (rte_eth_dev_detach(port_id, name)) {
2009 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2013 nb_ports = rte_eth_dev_count_avail();
2015 update_fwd_ports(RTE_MAX_ETHPORTS);
2017 printf("Port %u is detached. Now total ports is %d\n",
2026 struct rte_device *device;
2031 stop_packet_forwarding();
2033 if (ports != NULL) {
2035 RTE_ETH_FOREACH_DEV(pt_id) {
2036 printf("\nShutting down port %d...\n", pt_id);
2042 * This is a workaround to fix a virtio-user issue that
2043 * requires to call clean-up routine to remove existing
2045 * This workaround valid only for testpmd, needs a fix
2046 * valid for all applications.
2047 * TODO: Implement proper resource cleanup
2049 device = rte_eth_devices[pt_id].device;
2050 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2056 ret = rte_dev_event_monitor_stop();
2059 "fail to stop device event monitor.");
2061 ret = eth_dev_event_callback_unregister();
2064 "fail to unregister all event callbacks.");
2067 printf("\nBye...\n");
2070 typedef void (*cmd_func_t)(void);
2071 struct pmd_test_command {
2072 const char *cmd_name;
2073 cmd_func_t cmd_func;
2076 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2078 /* Check the link status of all ports in up to 9s, and print them finally */
2080 check_all_ports_link_status(uint32_t port_mask)
2082 #define CHECK_INTERVAL 100 /* 100ms */
2083 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2085 uint8_t count, all_ports_up, print_flag = 0;
2086 struct rte_eth_link link;
2088 printf("Checking link statuses...\n");
2090 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2092 RTE_ETH_FOREACH_DEV(portid) {
2093 if ((port_mask & (1 << portid)) == 0)
2095 memset(&link, 0, sizeof(link));
2096 rte_eth_link_get_nowait(portid, &link);
2097 /* print link status if flag set */
2098 if (print_flag == 1) {
2099 if (link.link_status)
2101 "Port%d Link Up. speed %u Mbps- %s\n",
2102 portid, link.link_speed,
2103 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2104 ("full-duplex") : ("half-duplex\n"));
2106 printf("Port %d Link Down\n", portid);
2109 /* clear all_ports_up flag if any link down */
2110 if (link.link_status == ETH_LINK_DOWN) {
2115 /* after finally printing all link status, get out */
2116 if (print_flag == 1)
2119 if (all_ports_up == 0) {
2121 rte_delay_ms(CHECK_INTERVAL);
2124 /* set the print_flag if all ports up or timeout */
2125 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2135 rmv_event_callback(void *arg)
2137 int need_to_start = 0;
2138 int org_no_link_check = no_link_check;
2139 portid_t port_id = (intptr_t)arg;
2141 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2143 if (!test_done && port_is_forwarding(port_id)) {
2145 stop_packet_forwarding();
2149 no_link_check = org_no_link_check;
2150 close_port(port_id);
2151 detach_port(port_id);
2153 start_packet_forwarding(0);
2156 /* This function is used by the interrupt thread */
2158 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2161 static const char * const event_desc[] = {
2162 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2163 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2164 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2165 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2166 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2167 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2168 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2169 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2170 [RTE_ETH_EVENT_NEW] = "device probed",
2171 [RTE_ETH_EVENT_DESTROY] = "device released",
2172 [RTE_ETH_EVENT_MAX] = NULL,
2175 RTE_SET_USED(param);
2176 RTE_SET_USED(ret_param);
2178 if (type >= RTE_ETH_EVENT_MAX) {
2179 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2180 port_id, __func__, type);
2182 } else if (event_print_mask & (UINT32_C(1) << type)) {
2183 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2188 if (port_id_is_invalid(port_id, DISABLED_WARN))
2192 case RTE_ETH_EVENT_INTR_RMV:
2193 if (rte_eal_alarm_set(100000,
2194 rmv_event_callback, (void *)(intptr_t)port_id))
2195 fprintf(stderr, "Could not set up deferred device removal\n");
2203 /* This function is used by the interrupt thread */
2205 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2206 __rte_unused void *arg)
2208 if (type >= RTE_DEV_EVENT_MAX) {
2209 fprintf(stderr, "%s called upon invalid event %d\n",
2215 case RTE_DEV_EVENT_REMOVE:
2216 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2218 /* TODO: After finish failure handle, begin to stop
2219 * packet forward, stop port, close port, detach port.
2222 case RTE_DEV_EVENT_ADD:
2223 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2225 /* TODO: After finish kernel driver binding,
2226 * begin to attach port.
2235 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2239 uint8_t mapping_found = 0;
2241 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2242 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2243 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2244 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2245 tx_queue_stats_mappings[i].queue_id,
2246 tx_queue_stats_mappings[i].stats_counter_id);
2253 port->tx_queue_stats_mapping_enabled = 1;
2258 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2262 uint8_t mapping_found = 0;
2264 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2265 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2266 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2267 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2268 rx_queue_stats_mappings[i].queue_id,
2269 rx_queue_stats_mappings[i].stats_counter_id);
2276 port->rx_queue_stats_mapping_enabled = 1;
2281 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2285 diag = set_tx_queue_stats_mapping_registers(pi, port);
2287 if (diag == -ENOTSUP) {
2288 port->tx_queue_stats_mapping_enabled = 0;
2289 printf("TX queue stats mapping not supported port id=%d\n", pi);
2292 rte_exit(EXIT_FAILURE,
2293 "set_tx_queue_stats_mapping_registers "
2294 "failed for port id=%d diag=%d\n",
2298 diag = set_rx_queue_stats_mapping_registers(pi, port);
2300 if (diag == -ENOTSUP) {
2301 port->rx_queue_stats_mapping_enabled = 0;
2302 printf("RX queue stats mapping not supported port id=%d\n", pi);
2305 rte_exit(EXIT_FAILURE,
2306 "set_rx_queue_stats_mapping_registers "
2307 "failed for port id=%d diag=%d\n",
2313 rxtx_port_config(struct rte_port *port)
2317 for (qid = 0; qid < nb_rxq; qid++) {
2318 port->rx_conf[qid] = port->dev_info.default_rxconf;
2320 /* Check if any Rx parameters have been passed */
2321 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2322 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2324 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2325 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2327 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2328 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2330 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2331 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2333 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2334 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2336 port->nb_rx_desc[qid] = nb_rxd;
2339 for (qid = 0; qid < nb_txq; qid++) {
2340 port->tx_conf[qid] = port->dev_info.default_txconf;
2342 /* Check if any Tx parameters have been passed */
2343 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2344 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2346 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2347 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2349 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2350 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2352 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2353 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2355 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2356 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2358 port->nb_tx_desc[qid] = nb_txd;
2363 init_port_config(void)
2366 struct rte_port *port;
2368 RTE_ETH_FOREACH_DEV(pid) {
2370 port->dev_conf.fdir_conf = fdir_conf;
2371 rte_eth_dev_info_get(pid, &port->dev_info);
2373 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2374 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2375 rss_hf & port->dev_info.flow_type_rss_offloads;
2377 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2378 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2381 if (port->dcb_flag == 0) {
2382 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2383 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2385 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2388 rxtx_port_config(port);
2390 rte_eth_macaddr_get(pid, &port->eth_addr);
2392 map_port_queue_stats_mapping_registers(pid, port);
2393 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2394 rte_pmd_ixgbe_bypass_init(pid);
2397 if (lsc_interrupt &&
2398 (rte_eth_devices[pid].data->dev_flags &
2399 RTE_ETH_DEV_INTR_LSC))
2400 port->dev_conf.intr_conf.lsc = 1;
2401 if (rmv_interrupt &&
2402 (rte_eth_devices[pid].data->dev_flags &
2403 RTE_ETH_DEV_INTR_RMV))
2404 port->dev_conf.intr_conf.rmv = 1;
2408 void set_port_slave_flag(portid_t slave_pid)
2410 struct rte_port *port;
2412 port = &ports[slave_pid];
2413 port->slave_flag = 1;
2416 void clear_port_slave_flag(portid_t slave_pid)
2418 struct rte_port *port;
2420 port = &ports[slave_pid];
2421 port->slave_flag = 0;
2424 uint8_t port_is_bonding_slave(portid_t slave_pid)
2426 struct rte_port *port;
2428 port = &ports[slave_pid];
2429 if ((rte_eth_devices[slave_pid].data->dev_flags &
2430 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2435 const uint16_t vlan_tags[] = {
2436 0, 1, 2, 3, 4, 5, 6, 7,
2437 8, 9, 10, 11, 12, 13, 14, 15,
2438 16, 17, 18, 19, 20, 21, 22, 23,
2439 24, 25, 26, 27, 28, 29, 30, 31
2443 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2444 enum dcb_mode_enable dcb_mode,
2445 enum rte_eth_nb_tcs num_tcs,
2451 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2452 * given above, and the number of traffic classes available for use.
2454 if (dcb_mode == DCB_VT_ENABLED) {
2455 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2456 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2457 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2458 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2460 /* VMDQ+DCB RX and TX configurations */
2461 vmdq_rx_conf->enable_default_pool = 0;
2462 vmdq_rx_conf->default_pool = 0;
2463 vmdq_rx_conf->nb_queue_pools =
2464 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2465 vmdq_tx_conf->nb_queue_pools =
2466 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2468 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2469 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2470 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2471 vmdq_rx_conf->pool_map[i].pools =
2472 1 << (i % vmdq_rx_conf->nb_queue_pools);
2474 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2475 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2476 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2479 /* set DCB mode of RX and TX of multiple queues */
2480 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2481 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2483 struct rte_eth_dcb_rx_conf *rx_conf =
2484 ð_conf->rx_adv_conf.dcb_rx_conf;
2485 struct rte_eth_dcb_tx_conf *tx_conf =
2486 ð_conf->tx_adv_conf.dcb_tx_conf;
2488 rx_conf->nb_tcs = num_tcs;
2489 tx_conf->nb_tcs = num_tcs;
2491 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2492 rx_conf->dcb_tc[i] = i % num_tcs;
2493 tx_conf->dcb_tc[i] = i % num_tcs;
2495 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2496 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2497 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2501 eth_conf->dcb_capability_en =
2502 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2504 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2510 init_port_dcb_config(portid_t pid,
2511 enum dcb_mode_enable dcb_mode,
2512 enum rte_eth_nb_tcs num_tcs,
2515 struct rte_eth_conf port_conf;
2516 struct rte_port *rte_port;
2520 rte_port = &ports[pid];
2522 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2523 /* Enter DCB configuration status */
2526 port_conf.rxmode = rte_port->dev_conf.rxmode;
2527 port_conf.txmode = rte_port->dev_conf.txmode;
2529 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2530 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2533 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2535 /* re-configure the device . */
2536 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2538 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2540 /* If dev_info.vmdq_pool_base is greater than 0,
2541 * the queue id of vmdq pools is started after pf queues.
2543 if (dcb_mode == DCB_VT_ENABLED &&
2544 rte_port->dev_info.vmdq_pool_base > 0) {
2545 printf("VMDQ_DCB multi-queue mode is nonsensical"
2546 " for port %d.", pid);
2550 /* Assume the ports in testpmd have the same dcb capability
2551 * and has the same number of rxq and txq in dcb mode
2553 if (dcb_mode == DCB_VT_ENABLED) {
2554 if (rte_port->dev_info.max_vfs > 0) {
2555 nb_rxq = rte_port->dev_info.nb_rx_queues;
2556 nb_txq = rte_port->dev_info.nb_tx_queues;
2558 nb_rxq = rte_port->dev_info.max_rx_queues;
2559 nb_txq = rte_port->dev_info.max_tx_queues;
2562 /*if vt is disabled, use all pf queues */
2563 if (rte_port->dev_info.vmdq_pool_base == 0) {
2564 nb_rxq = rte_port->dev_info.max_rx_queues;
2565 nb_txq = rte_port->dev_info.max_tx_queues;
2567 nb_rxq = (queueid_t)num_tcs;
2568 nb_txq = (queueid_t)num_tcs;
2572 rx_free_thresh = 64;
2574 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2576 rxtx_port_config(rte_port);
2578 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2579 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2580 rx_vft_set(pid, vlan_tags[i], 1);
2582 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2583 map_port_queue_stats_mapping_registers(pid, rte_port);
2585 rte_port->dcb_flag = 1;
2593 /* Configuration of Ethernet ports. */
2594 ports = rte_zmalloc("testpmd: ports",
2595 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2596 RTE_CACHE_LINE_SIZE);
2597 if (ports == NULL) {
2598 rte_exit(EXIT_FAILURE,
2599 "rte_zmalloc(%d struct rte_port) failed\n",
2615 const char clr[] = { 27, '[', '2', 'J', '\0' };
2616 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2618 /* Clear screen and move to top left */
2619 printf("%s%s", clr, top_left);
2621 printf("\nPort statistics ====================================");
2622 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2623 nic_stats_display(fwd_ports_ids[i]);
2627 signal_handler(int signum)
2629 if (signum == SIGINT || signum == SIGTERM) {
2630 printf("\nSignal %d received, preparing to exit...\n",
2632 #ifdef RTE_LIBRTE_PDUMP
2633 /* uninitialize packet capture framework */
2636 #ifdef RTE_LIBRTE_LATENCY_STATS
2637 rte_latencystats_uninit();
2640 /* Set flag to indicate the force termination. */
2642 /* exit with the expected status */
2643 signal(signum, SIG_DFL);
2644 kill(getpid(), signum);
2649 main(int argc, char** argv)
2655 signal(SIGINT, signal_handler);
2656 signal(SIGTERM, signal_handler);
2658 diag = rte_eal_init(argc, argv);
2660 rte_panic("Cannot init EAL\n");
2662 testpmd_logtype = rte_log_register("testpmd");
2663 if (testpmd_logtype < 0)
2664 rte_panic("Cannot register log type");
2665 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2667 #ifdef RTE_LIBRTE_PDUMP
2668 /* initialize packet capture framework */
2669 rte_pdump_init(NULL);
2672 nb_ports = (portid_t) rte_eth_dev_count_avail();
2674 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2676 /* allocate port structures, and init them */
2679 set_def_fwd_config();
2681 rte_panic("Empty set of forwarding logical cores - check the "
2682 "core mask supplied in the command parameters\n");
2684 /* Bitrate/latency stats disabled by default */
2685 #ifdef RTE_LIBRTE_BITRATE
2686 bitrate_enabled = 0;
2688 #ifdef RTE_LIBRTE_LATENCY_STATS
2689 latencystats_enabled = 0;
2692 /* on FreeBSD, mlockall() is disabled by default */
2693 #ifdef RTE_EXEC_ENV_BSDAPP
2702 launch_args_parse(argc, argv);
2704 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2705 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2709 if (tx_first && interactive)
2710 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2711 "interactive mode.\n");
2713 if (tx_first && lsc_interrupt) {
2714 printf("Warning: lsc_interrupt needs to be off when "
2715 " using tx_first. Disabling.\n");
2719 if (!nb_rxq && !nb_txq)
2720 printf("Warning: Either rx or tx queues should be non-zero\n");
2722 if (nb_rxq > 1 && nb_rxq > nb_txq)
2723 printf("Warning: nb_rxq=%d enables RSS configuration, "
2724 "but nb_txq=%d will prevent to fully test it.\n",
2730 /* enable hot plug monitoring */
2731 ret = rte_dev_event_monitor_start();
2736 eth_dev_event_callback_register();
2740 if (start_port(RTE_PORT_ALL) != 0)
2741 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2743 /* set all ports to promiscuous mode by default */
2744 RTE_ETH_FOREACH_DEV(port_id)
2745 rte_eth_promiscuous_enable(port_id);
2747 /* Init metrics library */
2748 rte_metrics_init(rte_socket_id());
2750 #ifdef RTE_LIBRTE_LATENCY_STATS
2751 if (latencystats_enabled != 0) {
2752 int ret = rte_latencystats_init(1, NULL);
2754 printf("Warning: latencystats init()"
2755 " returned error %d\n", ret);
2756 printf("Latencystats running on lcore %d\n",
2757 latencystats_lcore_id);
2761 /* Setup bitrate stats */
2762 #ifdef RTE_LIBRTE_BITRATE
2763 if (bitrate_enabled != 0) {
2764 bitrate_data = rte_stats_bitrate_create();
2765 if (bitrate_data == NULL)
2766 rte_exit(EXIT_FAILURE,
2767 "Could not allocate bitrate data.\n");
2768 rte_stats_bitrate_reg(bitrate_data);
2772 #ifdef RTE_LIBRTE_CMDLINE
2773 if (strlen(cmdline_filename) != 0)
2774 cmdline_read_from_file(cmdline_filename);
2776 if (interactive == 1) {
2778 printf("Start automatic packet forwarding\n");
2779 start_packet_forwarding(0);
2791 printf("No commandline core given, start packet forwarding\n");
2792 start_packet_forwarding(tx_first);
2793 if (stats_period != 0) {
2794 uint64_t prev_time = 0, cur_time, diff_time = 0;
2795 uint64_t timer_period;
2797 /* Convert to number of cycles */
2798 timer_period = stats_period * rte_get_timer_hz();
2800 while (f_quit == 0) {
2801 cur_time = rte_get_timer_cycles();
2802 diff_time += cur_time - prev_time;
2804 if (diff_time >= timer_period) {
2806 /* Reset the timer */
2809 /* Sleep to avoid unnecessary checks */
2810 prev_time = cur_time;
2815 printf("Press enter to exit\n");
2816 rc = read(0, &c, 1);