1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
160 &softnic_tm_bypass_engine,
162 #ifdef RTE_LIBRTE_IEEE1588
163 &ieee1588_fwd_engine,
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
176 * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
180 * In container, it cannot terminate the process which running with 'stats-period'
181 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
186 * Configuration of packet segments used by the "txonly" processing engine.
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 TXONLY_DEF_PACKET_LEN,
192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
207 * Configurable number of RX/TX queues.
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
213 * Configurable number of RX/TX ring descriptors.
214 * Defaults are supplied by drivers via ethdev.
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 #define RTE_PMD_PARAM_UNSET -1
223 * Configurable values of RX and TX ring threshold registers.
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX free threshold.
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of RX drop enable.
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX free threshold.
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX RS bit threshold.
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Flow API isolated mode.
272 uint8_t flow_isolate_all;
275 * Avoids to check link status when starting/stopping a port.
277 uint8_t no_link_check = 0; /* check by default */
280 * Enable link status change notification
282 uint8_t lsc_interrupt = 1; /* enabled by default */
285 * Enable device removal notification.
287 uint8_t rmv_interrupt = 1; /* enabled by default */
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
292 * Display or mask ether events
293 * Default to all events except VF_MBOX
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
301 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
303 * Decide if all memory are locked for performance.
308 * NIC bypass mode configuration options.
311 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
312 /* The NIC bypass watchdog timeout. */
313 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
317 #ifdef RTE_LIBRTE_LATENCY_STATS
320 * Set when latency stats is enabled in the commandline
322 uint8_t latencystats_enabled;
325 * Lcore ID to serive latency statistics.
327 lcoreid_t latencystats_lcore_id = -1;
332 * Ethernet device configuration.
334 struct rte_eth_rxmode rx_mode = {
335 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
336 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
337 .ignore_offload_bitfield = 1,
340 struct rte_eth_txmode tx_mode = {
341 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
344 struct rte_fdir_conf fdir_conf = {
345 .mode = RTE_FDIR_MODE_NONE,
346 .pballoc = RTE_FDIR_PBALLOC_64K,
347 .status = RTE_FDIR_REPORT_STATUS,
349 .vlan_tci_mask = 0xFFEF,
351 .src_ip = 0xFFFFFFFF,
352 .dst_ip = 0xFFFFFFFF,
355 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
356 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
358 .src_port_mask = 0xFFFF,
359 .dst_port_mask = 0xFFFF,
360 .mac_addr_byte_mask = 0xFF,
361 .tunnel_type_mask = 1,
362 .tunnel_id_mask = 0xFFFFFFFF,
367 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
369 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
370 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
372 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
373 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
375 uint16_t nb_tx_queue_stats_mappings = 0;
376 uint16_t nb_rx_queue_stats_mappings = 0;
379 * Display zero values by default for xstats
381 uint8_t xstats_hide_zero;
383 unsigned int num_sockets = 0;
384 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
386 #ifdef RTE_LIBRTE_BITRATE
387 /* Bitrate statistics */
388 struct rte_stats_bitrates *bitrate_data;
389 lcoreid_t bitrate_lcore_id;
390 uint8_t bitrate_enabled;
393 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
394 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
396 /* Forward function declarations */
397 static void map_port_queue_stats_mapping_registers(portid_t pi,
398 struct rte_port *port);
399 static void check_all_ports_link_status(uint32_t port_mask);
400 static int eth_event_callback(portid_t port_id,
401 enum rte_eth_event_type type,
402 void *param, void *ret_param);
403 static void eth_dev_event_callback(char *device_name,
404 enum rte_dev_event_type type,
406 static int eth_dev_event_callback_register(void);
407 static int eth_dev_event_callback_unregister(void);
411 * Check if all the ports are started.
412 * If yes, return positive value. If not, return zero.
414 static int all_ports_started(void);
416 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
417 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
420 * Helper function to check if socket is already discovered.
421 * If yes, return positive value. If not, return zero.
424 new_socket_id(unsigned int socket_id)
428 for (i = 0; i < num_sockets; i++) {
429 if (socket_ids[i] == socket_id)
436 * Setup default configuration.
439 set_default_fwd_lcores_config(void)
443 unsigned int sock_num;
446 for (i = 0; i < RTE_MAX_LCORE; i++) {
447 sock_num = rte_lcore_to_socket_id(i);
448 if (new_socket_id(sock_num)) {
449 if (num_sockets >= RTE_MAX_NUMA_NODES) {
450 rte_exit(EXIT_FAILURE,
451 "Total sockets greater than %u\n",
454 socket_ids[num_sockets++] = sock_num;
456 if (!rte_lcore_is_enabled(i))
458 if (i == rte_get_master_lcore())
460 fwd_lcores_cpuids[nb_lc++] = i;
462 nb_lcores = (lcoreid_t) nb_lc;
463 nb_cfg_lcores = nb_lcores;
468 set_def_peer_eth_addrs(void)
472 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
473 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
474 peer_eth_addrs[i].addr_bytes[5] = i;
479 set_default_fwd_ports_config(void)
484 RTE_ETH_FOREACH_DEV(pt_id)
485 fwd_ports_ids[i++] = pt_id;
487 nb_cfg_ports = nb_ports;
488 nb_fwd_ports = nb_ports;
492 set_def_fwd_config(void)
494 set_default_fwd_lcores_config();
495 set_def_peer_eth_addrs();
496 set_default_fwd_ports_config();
500 * Configuration initialisation done once at init time.
503 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
504 unsigned int socket_id)
506 char pool_name[RTE_MEMPOOL_NAMESIZE];
507 struct rte_mempool *rte_mp = NULL;
510 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
511 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
514 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
515 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
518 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
519 mb_size, (unsigned) mb_mempool_cache,
520 sizeof(struct rte_pktmbuf_pool_private),
525 if (rte_mempool_populate_anon(rte_mp) == 0) {
526 rte_mempool_free(rte_mp);
530 rte_pktmbuf_pool_init(rte_mp, NULL);
531 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
533 /* wrapper to rte_mempool_create() */
534 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
535 rte_mbuf_best_mempool_ops());
536 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
541 if (rte_mp == NULL) {
542 rte_exit(EXIT_FAILURE,
543 "Creation of mbuf pool for socket %u failed: %s\n",
544 socket_id, rte_strerror(rte_errno));
545 } else if (verbose_level > 0) {
546 rte_mempool_dump(stdout, rte_mp);
551 * Check given socket id is valid or not with NUMA mode,
552 * if valid, return 0, else return -1
555 check_socket_id(const unsigned int socket_id)
557 static int warning_once = 0;
559 if (new_socket_id(socket_id)) {
560 if (!warning_once && numa_support)
561 printf("Warning: NUMA should be configured manually by"
562 " using --port-numa-config and"
563 " --ring-numa-config parameters along with"
572 * Get the allowed maximum number of RX queues.
573 * *pid return the port id which has minimal value of
574 * max_rx_queues in all ports.
577 get_allowed_max_nb_rxq(portid_t *pid)
579 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
581 struct rte_eth_dev_info dev_info;
583 RTE_ETH_FOREACH_DEV(pi) {
584 rte_eth_dev_info_get(pi, &dev_info);
585 if (dev_info.max_rx_queues < allowed_max_rxq) {
586 allowed_max_rxq = dev_info.max_rx_queues;
590 return allowed_max_rxq;
594 * Check input rxq is valid or not.
595 * If input rxq is not greater than any of maximum number
596 * of RX queues of all ports, it is valid.
597 * if valid, return 0, else return -1
600 check_nb_rxq(queueid_t rxq)
602 queueid_t allowed_max_rxq;
605 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 if (rxq > allowed_max_rxq) {
607 printf("Fail: input rxq (%u) can't be greater "
608 "than max_rx_queues (%u) of port %u\n",
618 * Get the allowed maximum number of TX queues.
619 * *pid return the port id which has minimal value of
620 * max_tx_queues in all ports.
623 get_allowed_max_nb_txq(portid_t *pid)
625 queueid_t allowed_max_txq = MAX_QUEUE_ID;
627 struct rte_eth_dev_info dev_info;
629 RTE_ETH_FOREACH_DEV(pi) {
630 rte_eth_dev_info_get(pi, &dev_info);
631 if (dev_info.max_tx_queues < allowed_max_txq) {
632 allowed_max_txq = dev_info.max_tx_queues;
636 return allowed_max_txq;
640 * Check input txq is valid or not.
641 * If input txq is not greater than any of maximum number
642 * of TX queues of all ports, it is valid.
643 * if valid, return 0, else return -1
646 check_nb_txq(queueid_t txq)
648 queueid_t allowed_max_txq;
651 allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 if (txq > allowed_max_txq) {
653 printf("Fail: input txq (%u) can't be greater "
654 "than max_tx_queues (%u) of port %u\n",
667 struct rte_port *port;
668 struct rte_mempool *mbp;
669 unsigned int nb_mbuf_per_pool;
671 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 struct rte_gro_param gro_param;
676 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
679 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
684 /* Configuration of logical cores. */
685 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
686 sizeof(struct fwd_lcore *) * nb_lcores,
687 RTE_CACHE_LINE_SIZE);
688 if (fwd_lcores == NULL) {
689 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
690 "failed\n", nb_lcores);
692 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
693 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
694 sizeof(struct fwd_lcore),
695 RTE_CACHE_LINE_SIZE);
696 if (fwd_lcores[lc_id] == NULL) {
697 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
700 fwd_lcores[lc_id]->cpuid_idx = lc_id;
703 RTE_ETH_FOREACH_DEV(pid) {
705 /* Apply default TxRx configuration for all ports */
706 port->dev_conf.txmode = tx_mode;
707 port->dev_conf.rxmode = rx_mode;
708 rte_eth_dev_info_get(pid, &port->dev_info);
710 if (!(port->dev_info.rx_offload_capa &
711 DEV_RX_OFFLOAD_CRC_STRIP))
712 port->dev_conf.rxmode.offloads &=
713 ~DEV_RX_OFFLOAD_CRC_STRIP;
714 if (!(port->dev_info.tx_offload_capa &
715 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
716 port->dev_conf.txmode.offloads &=
717 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
719 if (port_numa[pid] != NUMA_NO_CONFIG)
720 port_per_socket[port_numa[pid]]++;
722 uint32_t socket_id = rte_eth_dev_socket_id(pid);
724 /* if socket_id is invalid, set to 0 */
725 if (check_socket_id(socket_id) < 0)
727 port_per_socket[socket_id]++;
731 /* Apply Rx offloads configuration */
732 for (k = 0; k < port->dev_info.max_rx_queues; k++)
733 port->rx_conf[k].offloads =
734 port->dev_conf.rxmode.offloads;
735 /* Apply Tx offloads configuration */
736 for (k = 0; k < port->dev_info.max_tx_queues; k++)
737 port->tx_conf[k].offloads =
738 port->dev_conf.txmode.offloads;
740 /* set flag to initialize port/queue */
741 port->need_reconfig = 1;
742 port->need_reconfig_queues = 1;
746 * Create pools of mbuf.
747 * If NUMA support is disabled, create a single pool of mbuf in
748 * socket 0 memory by default.
749 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
751 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
752 * nb_txd can be configured at run time.
754 if (param_total_num_mbufs)
755 nb_mbuf_per_pool = param_total_num_mbufs;
757 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
758 (nb_lcores * mb_mempool_cache) +
759 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
760 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
766 for (i = 0; i < num_sockets; i++)
767 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
770 if (socket_num == UMA_NO_CONFIG)
771 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
773 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
779 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
780 DEV_TX_OFFLOAD_GRE_TNL_TSO;
782 * Records which Mbuf pool to use by each logical core, if needed.
784 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
785 mbp = mbuf_pool_find(
786 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
789 mbp = mbuf_pool_find(0);
790 fwd_lcores[lc_id]->mbp = mbp;
791 /* initialize GSO context */
792 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
793 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
794 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
795 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
797 fwd_lcores[lc_id]->gso_ctx.flag = 0;
800 /* Configuration of packet forwarding streams. */
801 if (init_fwd_streams() < 0)
802 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
806 /* create a gro context for each lcore */
807 gro_param.gro_types = RTE_GRO_TCP_IPV4;
808 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
809 gro_param.max_item_per_flow = MAX_PKT_BURST;
810 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
811 gro_param.socket_id = rte_lcore_to_socket_id(
812 fwd_lcores_cpuids[lc_id]);
813 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
814 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
815 rte_exit(EXIT_FAILURE,
816 "rte_gro_ctx_create() failed\n");
823 reconfig(portid_t new_port_id, unsigned socket_id)
825 struct rte_port *port;
827 /* Reconfiguration of Ethernet ports. */
828 port = &ports[new_port_id];
829 rte_eth_dev_info_get(new_port_id, &port->dev_info);
831 /* set flag to initialize port/queue */
832 port->need_reconfig = 1;
833 port->need_reconfig_queues = 1;
834 port->socket_id = socket_id;
841 init_fwd_streams(void)
844 struct rte_port *port;
845 streamid_t sm_id, nb_fwd_streams_new;
848 /* set socket id according to numa or not */
849 RTE_ETH_FOREACH_DEV(pid) {
851 if (nb_rxq > port->dev_info.max_rx_queues) {
852 printf("Fail: nb_rxq(%d) is greater than "
853 "max_rx_queues(%d)\n", nb_rxq,
854 port->dev_info.max_rx_queues);
857 if (nb_txq > port->dev_info.max_tx_queues) {
858 printf("Fail: nb_txq(%d) is greater than "
859 "max_tx_queues(%d)\n", nb_txq,
860 port->dev_info.max_tx_queues);
864 if (port_numa[pid] != NUMA_NO_CONFIG)
865 port->socket_id = port_numa[pid];
867 port->socket_id = rte_eth_dev_socket_id(pid);
869 /* if socket_id is invalid, set to 0 */
870 if (check_socket_id(port->socket_id) < 0)
875 if (socket_num == UMA_NO_CONFIG)
878 port->socket_id = socket_num;
882 q = RTE_MAX(nb_rxq, nb_txq);
884 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
887 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
888 if (nb_fwd_streams_new == nb_fwd_streams)
891 if (fwd_streams != NULL) {
892 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
893 if (fwd_streams[sm_id] == NULL)
895 rte_free(fwd_streams[sm_id]);
896 fwd_streams[sm_id] = NULL;
898 rte_free(fwd_streams);
903 nb_fwd_streams = nb_fwd_streams_new;
904 if (nb_fwd_streams) {
905 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
906 sizeof(struct fwd_stream *) * nb_fwd_streams,
907 RTE_CACHE_LINE_SIZE);
908 if (fwd_streams == NULL)
909 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
910 " (struct fwd_stream *)) failed\n",
913 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
914 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
915 " struct fwd_stream", sizeof(struct fwd_stream),
916 RTE_CACHE_LINE_SIZE);
917 if (fwd_streams[sm_id] == NULL)
918 rte_exit(EXIT_FAILURE, "rte_zmalloc"
919 "(struct fwd_stream) failed\n");
926 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
928 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
930 unsigned int total_burst;
931 unsigned int nb_burst;
932 unsigned int burst_stats[3];
933 uint16_t pktnb_stats[3];
935 int burst_percent[3];
938 * First compute the total number of packet bursts and the
939 * two highest numbers of bursts of the same number of packets.
942 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
943 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
944 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
945 nb_burst = pbs->pkt_burst_spread[nb_pkt];
948 total_burst += nb_burst;
949 if (nb_burst > burst_stats[0]) {
950 burst_stats[1] = burst_stats[0];
951 pktnb_stats[1] = pktnb_stats[0];
952 burst_stats[0] = nb_burst;
953 pktnb_stats[0] = nb_pkt;
954 } else if (nb_burst > burst_stats[1]) {
955 burst_stats[1] = nb_burst;
956 pktnb_stats[1] = nb_pkt;
959 if (total_burst == 0)
961 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
962 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
963 burst_percent[0], (int) pktnb_stats[0]);
964 if (burst_stats[0] == total_burst) {
968 if (burst_stats[0] + burst_stats[1] == total_burst) {
969 printf(" + %d%% of %d pkts]\n",
970 100 - burst_percent[0], pktnb_stats[1]);
973 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
974 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
975 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
976 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
979 printf(" + %d%% of %d pkts + %d%% of others]\n",
980 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
982 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
985 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
987 struct rte_port *port;
990 static const char *fwd_stats_border = "----------------------";
992 port = &ports[port_id];
993 printf("\n %s Forward statistics for port %-2d %s\n",
994 fwd_stats_border, port_id, fwd_stats_border);
996 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
997 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
999 stats->ipackets, stats->imissed,
1000 (uint64_t) (stats->ipackets + stats->imissed));
1002 if (cur_fwd_eng == &csum_fwd_engine)
1003 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1004 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1005 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1006 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1007 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1010 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1012 stats->opackets, port->tx_dropped,
1013 (uint64_t) (stats->opackets + port->tx_dropped));
1016 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1018 stats->ipackets, stats->imissed,
1019 (uint64_t) (stats->ipackets + stats->imissed));
1021 if (cur_fwd_eng == &csum_fwd_engine)
1022 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1023 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1024 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1025 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1026 printf(" RX-nombufs: %14"PRIu64"\n",
1030 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1032 stats->opackets, port->tx_dropped,
1033 (uint64_t) (stats->opackets + port->tx_dropped));
1036 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1037 if (port->rx_stream)
1038 pkt_burst_stats_display("RX",
1039 &port->rx_stream->rx_burst_stats);
1040 if (port->tx_stream)
1041 pkt_burst_stats_display("TX",
1042 &port->tx_stream->tx_burst_stats);
1045 if (port->rx_queue_stats_mapping_enabled) {
1047 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1048 printf(" Stats reg %2d RX-packets:%14"PRIu64
1049 " RX-errors:%14"PRIu64
1050 " RX-bytes:%14"PRIu64"\n",
1051 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1055 if (port->tx_queue_stats_mapping_enabled) {
1056 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1057 printf(" Stats reg %2d TX-packets:%14"PRIu64
1058 " TX-bytes:%14"PRIu64"\n",
1059 i, stats->q_opackets[i], stats->q_obytes[i]);
1063 printf(" %s--------------------------------%s\n",
1064 fwd_stats_border, fwd_stats_border);
1068 fwd_stream_stats_display(streamid_t stream_id)
1070 struct fwd_stream *fs;
1071 static const char *fwd_top_stats_border = "-------";
1073 fs = fwd_streams[stream_id];
1074 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1075 (fs->fwd_dropped == 0))
1077 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1078 "TX Port=%2d/Queue=%2d %s\n",
1079 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1080 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1081 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1082 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1084 /* if checksum mode */
1085 if (cur_fwd_eng == &csum_fwd_engine) {
1086 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1087 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1090 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1091 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1092 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1097 flush_fwd_rx_queues(void)
1099 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1106 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1107 uint64_t timer_period;
1109 /* convert to number of cycles */
1110 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1112 for (j = 0; j < 2; j++) {
1113 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1114 for (rxq = 0; rxq < nb_rxq; rxq++) {
1115 port_id = fwd_ports_ids[rxp];
1117 * testpmd can stuck in the below do while loop
1118 * if rte_eth_rx_burst() always returns nonzero
1119 * packets. So timer is added to exit this loop
1120 * after 1sec timer expiry.
1122 prev_tsc = rte_rdtsc();
1124 nb_rx = rte_eth_rx_burst(port_id, rxq,
1125 pkts_burst, MAX_PKT_BURST);
1126 for (i = 0; i < nb_rx; i++)
1127 rte_pktmbuf_free(pkts_burst[i]);
1129 cur_tsc = rte_rdtsc();
1130 diff_tsc = cur_tsc - prev_tsc;
1131 timer_tsc += diff_tsc;
1132 } while ((nb_rx > 0) &&
1133 (timer_tsc < timer_period));
1137 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1142 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1144 struct fwd_stream **fsm;
1147 #ifdef RTE_LIBRTE_BITRATE
1148 uint64_t tics_per_1sec;
1149 uint64_t tics_datum;
1150 uint64_t tics_current;
1153 tics_datum = rte_rdtsc();
1154 tics_per_1sec = rte_get_timer_hz();
1156 fsm = &fwd_streams[fc->stream_idx];
1157 nb_fs = fc->stream_nb;
1159 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1160 (*pkt_fwd)(fsm[sm_id]);
1161 #ifdef RTE_LIBRTE_BITRATE
1162 if (bitrate_enabled != 0 &&
1163 bitrate_lcore_id == rte_lcore_id()) {
1164 tics_current = rte_rdtsc();
1165 if (tics_current - tics_datum >= tics_per_1sec) {
1166 /* Periodic bitrate calculation */
1167 RTE_ETH_FOREACH_DEV(idx_port)
1168 rte_stats_bitrate_calc(bitrate_data,
1170 tics_datum = tics_current;
1174 #ifdef RTE_LIBRTE_LATENCY_STATS
1175 if (latencystats_enabled != 0 &&
1176 latencystats_lcore_id == rte_lcore_id())
1177 rte_latencystats_update();
1180 } while (! fc->stopped);
1184 start_pkt_forward_on_core(void *fwd_arg)
1186 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1187 cur_fwd_config.fwd_eng->packet_fwd);
1192 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1193 * Used to start communication flows in network loopback test configurations.
1196 run_one_txonly_burst_on_core(void *fwd_arg)
1198 struct fwd_lcore *fwd_lc;
1199 struct fwd_lcore tmp_lcore;
1201 fwd_lc = (struct fwd_lcore *) fwd_arg;
1202 tmp_lcore = *fwd_lc;
1203 tmp_lcore.stopped = 1;
1204 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1209 * Launch packet forwarding:
1210 * - Setup per-port forwarding context.
1211 * - launch logical cores with their forwarding configuration.
1214 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1216 port_fwd_begin_t port_fwd_begin;
1221 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1222 if (port_fwd_begin != NULL) {
1223 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1224 (*port_fwd_begin)(fwd_ports_ids[i]);
1226 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1227 lc_id = fwd_lcores_cpuids[i];
1228 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1229 fwd_lcores[i]->stopped = 0;
1230 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1231 fwd_lcores[i], lc_id);
1233 printf("launch lcore %u failed - diag=%d\n",
1240 * Update the forward ports list.
1243 update_fwd_ports(portid_t new_pid)
1246 unsigned int new_nb_fwd_ports = 0;
1249 for (i = 0; i < nb_fwd_ports; ++i) {
1250 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1253 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1257 if (new_pid < RTE_MAX_ETHPORTS)
1258 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1260 nb_fwd_ports = new_nb_fwd_ports;
1261 nb_cfg_ports = new_nb_fwd_ports;
1265 * Launch packet forwarding configuration.
1268 start_packet_forwarding(int with_tx_first)
1270 port_fwd_begin_t port_fwd_begin;
1271 port_fwd_end_t port_fwd_end;
1272 struct rte_port *port;
1277 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1278 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1280 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1281 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1283 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1284 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1285 (!nb_rxq || !nb_txq))
1286 rte_exit(EXIT_FAILURE,
1287 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1288 cur_fwd_eng->fwd_mode_name);
1290 if (all_ports_started() == 0) {
1291 printf("Not all ports were started\n");
1294 if (test_done == 0) {
1295 printf("Packet forwarding already started\n");
1301 for (i = 0; i < nb_fwd_ports; i++) {
1302 pt_id = fwd_ports_ids[i];
1303 port = &ports[pt_id];
1304 if (!port->dcb_flag) {
1305 printf("In DCB mode, all forwarding ports must "
1306 "be configured in this mode.\n");
1310 if (nb_fwd_lcores == 1) {
1311 printf("In DCB mode,the nb forwarding cores "
1312 "should be larger than 1.\n");
1321 flush_fwd_rx_queues();
1323 pkt_fwd_config_display(&cur_fwd_config);
1324 rxtx_config_display();
1326 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1327 pt_id = fwd_ports_ids[i];
1328 port = &ports[pt_id];
1329 rte_eth_stats_get(pt_id, &port->stats);
1330 port->tx_dropped = 0;
1332 map_port_queue_stats_mapping_registers(pt_id, port);
1334 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1335 fwd_streams[sm_id]->rx_packets = 0;
1336 fwd_streams[sm_id]->tx_packets = 0;
1337 fwd_streams[sm_id]->fwd_dropped = 0;
1338 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1339 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1341 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1342 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1343 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1344 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1345 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1347 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1348 fwd_streams[sm_id]->core_cycles = 0;
1351 if (with_tx_first) {
1352 port_fwd_begin = tx_only_engine.port_fwd_begin;
1353 if (port_fwd_begin != NULL) {
1354 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1355 (*port_fwd_begin)(fwd_ports_ids[i]);
1357 while (with_tx_first--) {
1358 launch_packet_forwarding(
1359 run_one_txonly_burst_on_core);
1360 rte_eal_mp_wait_lcore();
1362 port_fwd_end = tx_only_engine.port_fwd_end;
1363 if (port_fwd_end != NULL) {
1364 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1365 (*port_fwd_end)(fwd_ports_ids[i]);
1368 launch_packet_forwarding(start_pkt_forward_on_core);
1372 stop_packet_forwarding(void)
1374 struct rte_eth_stats stats;
1375 struct rte_port *port;
1376 port_fwd_end_t port_fwd_end;
1381 uint64_t total_recv;
1382 uint64_t total_xmit;
1383 uint64_t total_rx_dropped;
1384 uint64_t total_tx_dropped;
1385 uint64_t total_rx_nombuf;
1386 uint64_t tx_dropped;
1387 uint64_t rx_bad_ip_csum;
1388 uint64_t rx_bad_l4_csum;
1389 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1390 uint64_t fwd_cycles;
1393 static const char *acc_stats_border = "+++++++++++++++";
1396 printf("Packet forwarding not started\n");
1399 printf("Telling cores to stop...");
1400 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1401 fwd_lcores[lc_id]->stopped = 1;
1402 printf("\nWaiting for lcores to finish...\n");
1403 rte_eal_mp_wait_lcore();
1404 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1405 if (port_fwd_end != NULL) {
1406 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1407 pt_id = fwd_ports_ids[i];
1408 (*port_fwd_end)(pt_id);
1411 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1414 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1415 if (cur_fwd_config.nb_fwd_streams >
1416 cur_fwd_config.nb_fwd_ports) {
1417 fwd_stream_stats_display(sm_id);
1418 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1419 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1421 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1423 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1426 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1427 tx_dropped = (uint64_t) (tx_dropped +
1428 fwd_streams[sm_id]->fwd_dropped);
1429 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1432 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1433 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1434 fwd_streams[sm_id]->rx_bad_ip_csum);
1435 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1439 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1440 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1441 fwd_streams[sm_id]->rx_bad_l4_csum);
1442 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1445 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1446 fwd_cycles = (uint64_t) (fwd_cycles +
1447 fwd_streams[sm_id]->core_cycles);
1452 total_rx_dropped = 0;
1453 total_tx_dropped = 0;
1454 total_rx_nombuf = 0;
1455 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1456 pt_id = fwd_ports_ids[i];
1458 port = &ports[pt_id];
1459 rte_eth_stats_get(pt_id, &stats);
1460 stats.ipackets -= port->stats.ipackets;
1461 port->stats.ipackets = 0;
1462 stats.opackets -= port->stats.opackets;
1463 port->stats.opackets = 0;
1464 stats.ibytes -= port->stats.ibytes;
1465 port->stats.ibytes = 0;
1466 stats.obytes -= port->stats.obytes;
1467 port->stats.obytes = 0;
1468 stats.imissed -= port->stats.imissed;
1469 port->stats.imissed = 0;
1470 stats.oerrors -= port->stats.oerrors;
1471 port->stats.oerrors = 0;
1472 stats.rx_nombuf -= port->stats.rx_nombuf;
1473 port->stats.rx_nombuf = 0;
1475 total_recv += stats.ipackets;
1476 total_xmit += stats.opackets;
1477 total_rx_dropped += stats.imissed;
1478 total_tx_dropped += port->tx_dropped;
1479 total_rx_nombuf += stats.rx_nombuf;
1481 fwd_port_stats_display(pt_id, &stats);
1484 printf("\n %s Accumulated forward statistics for all ports"
1486 acc_stats_border, acc_stats_border);
1487 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1489 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1491 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1492 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1493 if (total_rx_nombuf > 0)
1494 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1495 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1497 acc_stats_border, acc_stats_border);
1498 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1500 printf("\n CPU cycles/packet=%u (total cycles="
1501 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1502 (unsigned int)(fwd_cycles / total_recv),
1503 fwd_cycles, total_recv);
1505 printf("\nDone.\n");
1510 dev_set_link_up(portid_t pid)
1512 if (rte_eth_dev_set_link_up(pid) < 0)
1513 printf("\nSet link up fail.\n");
1517 dev_set_link_down(portid_t pid)
1519 if (rte_eth_dev_set_link_down(pid) < 0)
1520 printf("\nSet link down fail.\n");
1524 all_ports_started(void)
1527 struct rte_port *port;
1529 RTE_ETH_FOREACH_DEV(pi) {
1531 /* Check if there is a port which is not started */
1532 if ((port->port_status != RTE_PORT_STARTED) &&
1533 (port->slave_flag == 0))
1537 /* No port is not started */
1542 port_is_stopped(portid_t port_id)
1544 struct rte_port *port = &ports[port_id];
1546 if ((port->port_status != RTE_PORT_STOPPED) &&
1547 (port->slave_flag == 0))
1553 all_ports_stopped(void)
1557 RTE_ETH_FOREACH_DEV(pi) {
1558 if (!port_is_stopped(pi))
1566 port_is_started(portid_t port_id)
1568 if (port_id_is_invalid(port_id, ENABLED_WARN))
1571 if (ports[port_id].port_status != RTE_PORT_STARTED)
1578 port_is_closed(portid_t port_id)
1580 if (port_id_is_invalid(port_id, ENABLED_WARN))
1583 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1590 start_port(portid_t pid)
1592 int diag, need_check_link_status = -1;
1595 struct rte_port *port;
1596 struct ether_addr mac_addr;
1597 enum rte_eth_event_type event_type;
1599 if (port_id_is_invalid(pid, ENABLED_WARN))
1604 RTE_ETH_FOREACH_DEV(pi) {
1605 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1608 need_check_link_status = 0;
1610 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1611 RTE_PORT_HANDLING) == 0) {
1612 printf("Port %d is now not stopped\n", pi);
1616 if (port->need_reconfig > 0) {
1617 port->need_reconfig = 0;
1619 if (flow_isolate_all) {
1620 int ret = port_flow_isolate(pi, 1);
1622 printf("Failed to apply isolated"
1623 " mode on port %d\n", pi);
1628 printf("Configuring Port %d (socket %u)\n", pi,
1630 /* configure port */
1631 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1634 if (rte_atomic16_cmpset(&(port->port_status),
1635 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1636 printf("Port %d can not be set back "
1637 "to stopped\n", pi);
1638 printf("Fail to configure port %d\n", pi);
1639 /* try to reconfigure port next time */
1640 port->need_reconfig = 1;
1644 if (port->need_reconfig_queues > 0) {
1645 port->need_reconfig_queues = 0;
1646 /* setup tx queues */
1647 for (qi = 0; qi < nb_txq; qi++) {
1648 port->tx_conf[qi].txq_flags =
1649 ETH_TXQ_FLAGS_IGNORE;
1650 if ((numa_support) &&
1651 (txring_numa[pi] != NUMA_NO_CONFIG))
1652 diag = rte_eth_tx_queue_setup(pi, qi,
1653 port->nb_tx_desc[qi],
1655 &(port->tx_conf[qi]));
1657 diag = rte_eth_tx_queue_setup(pi, qi,
1658 port->nb_tx_desc[qi],
1660 &(port->tx_conf[qi]));
1665 /* Fail to setup tx queue, return */
1666 if (rte_atomic16_cmpset(&(port->port_status),
1668 RTE_PORT_STOPPED) == 0)
1669 printf("Port %d can not be set back "
1670 "to stopped\n", pi);
1671 printf("Fail to configure port %d tx queues\n",
1673 /* try to reconfigure queues next time */
1674 port->need_reconfig_queues = 1;
1677 for (qi = 0; qi < nb_rxq; qi++) {
1678 /* setup rx queues */
1679 if ((numa_support) &&
1680 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1681 struct rte_mempool * mp =
1682 mbuf_pool_find(rxring_numa[pi]);
1684 printf("Failed to setup RX queue:"
1685 "No mempool allocation"
1686 " on the socket %d\n",
1691 diag = rte_eth_rx_queue_setup(pi, qi,
1692 port->nb_rx_desc[qi],
1694 &(port->rx_conf[qi]),
1697 struct rte_mempool *mp =
1698 mbuf_pool_find(port->socket_id);
1700 printf("Failed to setup RX queue:"
1701 "No mempool allocation"
1702 " on the socket %d\n",
1706 diag = rte_eth_rx_queue_setup(pi, qi,
1707 port->nb_rx_desc[qi],
1709 &(port->rx_conf[qi]),
1715 /* Fail to setup rx queue, return */
1716 if (rte_atomic16_cmpset(&(port->port_status),
1718 RTE_PORT_STOPPED) == 0)
1719 printf("Port %d can not be set back "
1720 "to stopped\n", pi);
1721 printf("Fail to configure port %d rx queues\n",
1723 /* try to reconfigure queues next time */
1724 port->need_reconfig_queues = 1;
1730 if (rte_eth_dev_start(pi) < 0) {
1731 printf("Fail to start port %d\n", pi);
1733 /* Fail to setup rx queue, return */
1734 if (rte_atomic16_cmpset(&(port->port_status),
1735 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1736 printf("Port %d can not be set back to "
1741 if (rte_atomic16_cmpset(&(port->port_status),
1742 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1743 printf("Port %d can not be set into started\n", pi);
1745 rte_eth_macaddr_get(pi, &mac_addr);
1746 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1747 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1748 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1749 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1751 /* at least one port started, need checking link status */
1752 need_check_link_status = 1;
1755 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1756 event_type < RTE_ETH_EVENT_MAX;
1758 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1763 printf("Failed to setup even callback for event %d\n",
1769 if (need_check_link_status == 1 && !no_link_check)
1770 check_all_ports_link_status(RTE_PORT_ALL);
1771 else if (need_check_link_status == 0)
1772 printf("Please stop the ports first\n");
1779 stop_port(portid_t pid)
1782 struct rte_port *port;
1783 int need_check_link_status = 0;
1790 if (port_id_is_invalid(pid, ENABLED_WARN))
1793 printf("Stopping ports...\n");
1795 RTE_ETH_FOREACH_DEV(pi) {
1796 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1799 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1800 printf("Please remove port %d from forwarding configuration.\n", pi);
1804 if (port_is_bonding_slave(pi)) {
1805 printf("Please remove port %d from bonded device.\n", pi);
1810 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1811 RTE_PORT_HANDLING) == 0)
1814 rte_eth_dev_stop(pi);
1816 if (rte_atomic16_cmpset(&(port->port_status),
1817 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1818 printf("Port %d can not be set into stopped\n", pi);
1819 need_check_link_status = 1;
1821 if (need_check_link_status && !no_link_check)
1822 check_all_ports_link_status(RTE_PORT_ALL);
1828 close_port(portid_t pid)
1831 struct rte_port *port;
1833 if (port_id_is_invalid(pid, ENABLED_WARN))
1836 printf("Closing ports...\n");
1838 RTE_ETH_FOREACH_DEV(pi) {
1839 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1842 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1843 printf("Please remove port %d from forwarding configuration.\n", pi);
1847 if (port_is_bonding_slave(pi)) {
1848 printf("Please remove port %d from bonded device.\n", pi);
1853 if (rte_atomic16_cmpset(&(port->port_status),
1854 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1855 printf("Port %d is already closed\n", pi);
1859 if (rte_atomic16_cmpset(&(port->port_status),
1860 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1861 printf("Port %d is now not stopped\n", pi);
1865 if (port->flow_list)
1866 port_flow_flush(pi);
1867 rte_eth_dev_close(pi);
1869 if (rte_atomic16_cmpset(&(port->port_status),
1870 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1871 printf("Port %d cannot be set to closed\n", pi);
1878 reset_port(portid_t pid)
1882 struct rte_port *port;
1884 if (port_id_is_invalid(pid, ENABLED_WARN))
1887 printf("Resetting ports...\n");
1889 RTE_ETH_FOREACH_DEV(pi) {
1890 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1893 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1894 printf("Please remove port %d from forwarding "
1895 "configuration.\n", pi);
1899 if (port_is_bonding_slave(pi)) {
1900 printf("Please remove port %d from bonded device.\n",
1905 diag = rte_eth_dev_reset(pi);
1908 port->need_reconfig = 1;
1909 port->need_reconfig_queues = 1;
1911 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1919 eth_dev_event_callback_register(void)
1923 /* register the device event callback */
1924 ret = rte_dev_event_callback_register(NULL,
1925 eth_dev_event_callback, NULL);
1927 printf("Failed to register device event callback\n");
1936 eth_dev_event_callback_unregister(void)
1940 /* unregister the device event callback */
1941 ret = rte_dev_event_callback_unregister(NULL,
1942 eth_dev_event_callback, NULL);
1944 printf("Failed to unregister device event callback\n");
1952 attach_port(char *identifier)
1955 unsigned int socket_id;
1957 printf("Attaching a new port...\n");
1959 if (identifier == NULL) {
1960 printf("Invalid parameters are specified\n");
1964 if (rte_eth_dev_attach(identifier, &pi))
1967 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1968 /* if socket_id is invalid, set to 0 */
1969 if (check_socket_id(socket_id) < 0)
1971 reconfig(pi, socket_id);
1972 rte_eth_promiscuous_enable(pi);
1974 nb_ports = rte_eth_dev_count_avail();
1976 ports[pi].port_status = RTE_PORT_STOPPED;
1978 update_fwd_ports(pi);
1980 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1985 detach_port(portid_t port_id)
1987 char name[RTE_ETH_NAME_MAX_LEN];
1989 printf("Detaching a port...\n");
1991 if (!port_is_closed(port_id)) {
1992 printf("Please close port first\n");
1996 if (ports[port_id].flow_list)
1997 port_flow_flush(port_id);
1999 if (rte_eth_dev_detach(port_id, name)) {
2000 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2004 nb_ports = rte_eth_dev_count_avail();
2006 update_fwd_ports(RTE_MAX_ETHPORTS);
2008 printf("Port %u is detached. Now total ports is %d\n",
2017 struct rte_device *device;
2022 stop_packet_forwarding();
2024 if (ports != NULL) {
2026 RTE_ETH_FOREACH_DEV(pt_id) {
2027 printf("\nShutting down port %d...\n", pt_id);
2033 * This is a workaround to fix a virtio-user issue that
2034 * requires to call clean-up routine to remove existing
2036 * This workaround valid only for testpmd, needs a fix
2037 * valid for all applications.
2038 * TODO: Implement proper resource cleanup
2040 device = rte_eth_devices[pt_id].device;
2041 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2047 ret = rte_dev_event_monitor_stop();
2050 "fail to stop device event monitor.");
2052 ret = eth_dev_event_callback_unregister();
2055 "fail to unregister all event callbacks.");
2058 printf("\nBye...\n");
2061 typedef void (*cmd_func_t)(void);
2062 struct pmd_test_command {
2063 const char *cmd_name;
2064 cmd_func_t cmd_func;
2067 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2069 /* Check the link status of all ports in up to 9s, and print them finally */
2071 check_all_ports_link_status(uint32_t port_mask)
2073 #define CHECK_INTERVAL 100 /* 100ms */
2074 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2076 uint8_t count, all_ports_up, print_flag = 0;
2077 struct rte_eth_link link;
2079 printf("Checking link statuses...\n");
2081 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2083 RTE_ETH_FOREACH_DEV(portid) {
2084 if ((port_mask & (1 << portid)) == 0)
2086 memset(&link, 0, sizeof(link));
2087 rte_eth_link_get_nowait(portid, &link);
2088 /* print link status if flag set */
2089 if (print_flag == 1) {
2090 if (link.link_status)
2092 "Port%d Link Up. speed %u Mbps- %s\n",
2093 portid, link.link_speed,
2094 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2095 ("full-duplex") : ("half-duplex\n"));
2097 printf("Port %d Link Down\n", portid);
2100 /* clear all_ports_up flag if any link down */
2101 if (link.link_status == ETH_LINK_DOWN) {
2106 /* after finally printing all link status, get out */
2107 if (print_flag == 1)
2110 if (all_ports_up == 0) {
2112 rte_delay_ms(CHECK_INTERVAL);
2115 /* set the print_flag if all ports up or timeout */
2116 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2126 rmv_event_callback(void *arg)
2128 int need_to_start = 0;
2129 int org_no_link_check = no_link_check;
2130 portid_t port_id = (intptr_t)arg;
2132 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2134 if (!test_done && port_is_forwarding(port_id)) {
2136 stop_packet_forwarding();
2140 no_link_check = org_no_link_check;
2141 close_port(port_id);
2142 detach_port(port_id);
2144 start_packet_forwarding(0);
2147 /* This function is used by the interrupt thread */
2149 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2152 static const char * const event_desc[] = {
2153 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2154 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2155 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2156 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2157 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2158 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2159 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2160 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2161 [RTE_ETH_EVENT_NEW] = "device probed",
2162 [RTE_ETH_EVENT_DESTROY] = "device released",
2163 [RTE_ETH_EVENT_MAX] = NULL,
2166 RTE_SET_USED(param);
2167 RTE_SET_USED(ret_param);
2169 if (type >= RTE_ETH_EVENT_MAX) {
2170 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2171 port_id, __func__, type);
2173 } else if (event_print_mask & (UINT32_C(1) << type)) {
2174 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2179 if (port_id_is_invalid(port_id, DISABLED_WARN))
2183 case RTE_ETH_EVENT_INTR_RMV:
2184 if (rte_eal_alarm_set(100000,
2185 rmv_event_callback, (void *)(intptr_t)port_id))
2186 fprintf(stderr, "Could not set up deferred device removal\n");
2194 /* This function is used by the interrupt thread */
2196 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2197 __rte_unused void *arg)
2199 if (type >= RTE_DEV_EVENT_MAX) {
2200 fprintf(stderr, "%s called upon invalid event %d\n",
2206 case RTE_DEV_EVENT_REMOVE:
2207 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2209 /* TODO: After finish failure handle, begin to stop
2210 * packet forward, stop port, close port, detach port.
2213 case RTE_DEV_EVENT_ADD:
2214 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2216 /* TODO: After finish kernel driver binding,
2217 * begin to attach port.
2226 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2230 uint8_t mapping_found = 0;
2232 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2233 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2234 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2235 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2236 tx_queue_stats_mappings[i].queue_id,
2237 tx_queue_stats_mappings[i].stats_counter_id);
2244 port->tx_queue_stats_mapping_enabled = 1;
2249 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2253 uint8_t mapping_found = 0;
2255 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2256 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2257 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2258 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2259 rx_queue_stats_mappings[i].queue_id,
2260 rx_queue_stats_mappings[i].stats_counter_id);
2267 port->rx_queue_stats_mapping_enabled = 1;
2272 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2276 diag = set_tx_queue_stats_mapping_registers(pi, port);
2278 if (diag == -ENOTSUP) {
2279 port->tx_queue_stats_mapping_enabled = 0;
2280 printf("TX queue stats mapping not supported port id=%d\n", pi);
2283 rte_exit(EXIT_FAILURE,
2284 "set_tx_queue_stats_mapping_registers "
2285 "failed for port id=%d diag=%d\n",
2289 diag = set_rx_queue_stats_mapping_registers(pi, port);
2291 if (diag == -ENOTSUP) {
2292 port->rx_queue_stats_mapping_enabled = 0;
2293 printf("RX queue stats mapping not supported port id=%d\n", pi);
2296 rte_exit(EXIT_FAILURE,
2297 "set_rx_queue_stats_mapping_registers "
2298 "failed for port id=%d diag=%d\n",
2304 rxtx_port_config(struct rte_port *port)
2308 for (qid = 0; qid < nb_rxq; qid++) {
2309 port->rx_conf[qid] = port->dev_info.default_rxconf;
2311 /* Check if any Rx parameters have been passed */
2312 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2313 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2315 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2316 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2318 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2319 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2321 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2322 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2324 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2325 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2327 port->nb_rx_desc[qid] = nb_rxd;
2330 for (qid = 0; qid < nb_txq; qid++) {
2331 port->tx_conf[qid] = port->dev_info.default_txconf;
2333 /* Check if any Tx parameters have been passed */
2334 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2335 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2337 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2338 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2340 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2341 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2343 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2344 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2346 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2347 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2349 port->nb_tx_desc[qid] = nb_txd;
2354 init_port_config(void)
2357 struct rte_port *port;
2359 RTE_ETH_FOREACH_DEV(pid) {
2361 port->dev_conf.fdir_conf = fdir_conf;
2362 rte_eth_dev_info_get(pid, &port->dev_info);
2364 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2365 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2366 rss_hf & port->dev_info.flow_type_rss_offloads;
2368 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2369 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2372 if (port->dcb_flag == 0) {
2373 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2374 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2376 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2379 rxtx_port_config(port);
2381 rte_eth_macaddr_get(pid, &port->eth_addr);
2383 map_port_queue_stats_mapping_registers(pid, port);
2384 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2385 rte_pmd_ixgbe_bypass_init(pid);
2388 if (lsc_interrupt &&
2389 (rte_eth_devices[pid].data->dev_flags &
2390 RTE_ETH_DEV_INTR_LSC))
2391 port->dev_conf.intr_conf.lsc = 1;
2392 if (rmv_interrupt &&
2393 (rte_eth_devices[pid].data->dev_flags &
2394 RTE_ETH_DEV_INTR_RMV))
2395 port->dev_conf.intr_conf.rmv = 1;
2397 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2398 /* Detect softnic port */
2399 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2400 port->softnic_enable = 1;
2401 memset(&port->softport, 0, sizeof(struct softnic_port));
2403 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2404 port->softport.tm_flag = 1;
2410 void set_port_slave_flag(portid_t slave_pid)
2412 struct rte_port *port;
2414 port = &ports[slave_pid];
2415 port->slave_flag = 1;
2418 void clear_port_slave_flag(portid_t slave_pid)
2420 struct rte_port *port;
2422 port = &ports[slave_pid];
2423 port->slave_flag = 0;
2426 uint8_t port_is_bonding_slave(portid_t slave_pid)
2428 struct rte_port *port;
2430 port = &ports[slave_pid];
2431 if ((rte_eth_devices[slave_pid].data->dev_flags &
2432 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2437 const uint16_t vlan_tags[] = {
2438 0, 1, 2, 3, 4, 5, 6, 7,
2439 8, 9, 10, 11, 12, 13, 14, 15,
2440 16, 17, 18, 19, 20, 21, 22, 23,
2441 24, 25, 26, 27, 28, 29, 30, 31
2445 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2446 enum dcb_mode_enable dcb_mode,
2447 enum rte_eth_nb_tcs num_tcs,
2453 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2454 * given above, and the number of traffic classes available for use.
2456 if (dcb_mode == DCB_VT_ENABLED) {
2457 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2458 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2459 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2460 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2462 /* VMDQ+DCB RX and TX configurations */
2463 vmdq_rx_conf->enable_default_pool = 0;
2464 vmdq_rx_conf->default_pool = 0;
2465 vmdq_rx_conf->nb_queue_pools =
2466 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2467 vmdq_tx_conf->nb_queue_pools =
2468 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2470 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2471 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2472 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2473 vmdq_rx_conf->pool_map[i].pools =
2474 1 << (i % vmdq_rx_conf->nb_queue_pools);
2476 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2477 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2478 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2481 /* set DCB mode of RX and TX of multiple queues */
2482 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2483 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2485 struct rte_eth_dcb_rx_conf *rx_conf =
2486 ð_conf->rx_adv_conf.dcb_rx_conf;
2487 struct rte_eth_dcb_tx_conf *tx_conf =
2488 ð_conf->tx_adv_conf.dcb_tx_conf;
2490 rx_conf->nb_tcs = num_tcs;
2491 tx_conf->nb_tcs = num_tcs;
2493 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2494 rx_conf->dcb_tc[i] = i % num_tcs;
2495 tx_conf->dcb_tc[i] = i % num_tcs;
2497 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2498 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2499 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2503 eth_conf->dcb_capability_en =
2504 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2506 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2512 init_port_dcb_config(portid_t pid,
2513 enum dcb_mode_enable dcb_mode,
2514 enum rte_eth_nb_tcs num_tcs,
2517 struct rte_eth_conf port_conf;
2518 struct rte_port *rte_port;
2522 rte_port = &ports[pid];
2524 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2525 /* Enter DCB configuration status */
2528 port_conf.rxmode = rte_port->dev_conf.rxmode;
2529 port_conf.txmode = rte_port->dev_conf.txmode;
2531 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2532 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2535 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2537 /* re-configure the device . */
2538 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2540 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2542 /* If dev_info.vmdq_pool_base is greater than 0,
2543 * the queue id of vmdq pools is started after pf queues.
2545 if (dcb_mode == DCB_VT_ENABLED &&
2546 rte_port->dev_info.vmdq_pool_base > 0) {
2547 printf("VMDQ_DCB multi-queue mode is nonsensical"
2548 " for port %d.", pid);
2552 /* Assume the ports in testpmd have the same dcb capability
2553 * and has the same number of rxq and txq in dcb mode
2555 if (dcb_mode == DCB_VT_ENABLED) {
2556 if (rte_port->dev_info.max_vfs > 0) {
2557 nb_rxq = rte_port->dev_info.nb_rx_queues;
2558 nb_txq = rte_port->dev_info.nb_tx_queues;
2560 nb_rxq = rte_port->dev_info.max_rx_queues;
2561 nb_txq = rte_port->dev_info.max_tx_queues;
2564 /*if vt is disabled, use all pf queues */
2565 if (rte_port->dev_info.vmdq_pool_base == 0) {
2566 nb_rxq = rte_port->dev_info.max_rx_queues;
2567 nb_txq = rte_port->dev_info.max_tx_queues;
2569 nb_rxq = (queueid_t)num_tcs;
2570 nb_txq = (queueid_t)num_tcs;
2574 rx_free_thresh = 64;
2576 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2578 rxtx_port_config(rte_port);
2580 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2581 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2582 rx_vft_set(pid, vlan_tags[i], 1);
2584 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2585 map_port_queue_stats_mapping_registers(pid, rte_port);
2587 rte_port->dcb_flag = 1;
2595 /* Configuration of Ethernet ports. */
2596 ports = rte_zmalloc("testpmd: ports",
2597 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2598 RTE_CACHE_LINE_SIZE);
2599 if (ports == NULL) {
2600 rte_exit(EXIT_FAILURE,
2601 "rte_zmalloc(%d struct rte_port) failed\n",
2617 const char clr[] = { 27, '[', '2', 'J', '\0' };
2618 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2620 /* Clear screen and move to top left */
2621 printf("%s%s", clr, top_left);
2623 printf("\nPort statistics ====================================");
2624 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2625 nic_stats_display(fwd_ports_ids[i]);
2629 signal_handler(int signum)
2631 if (signum == SIGINT || signum == SIGTERM) {
2632 printf("\nSignal %d received, preparing to exit...\n",
2634 #ifdef RTE_LIBRTE_PDUMP
2635 /* uninitialize packet capture framework */
2638 #ifdef RTE_LIBRTE_LATENCY_STATS
2639 rte_latencystats_uninit();
2642 /* Set flag to indicate the force termination. */
2644 /* exit with the expected status */
2645 signal(signum, SIG_DFL);
2646 kill(getpid(), signum);
2651 main(int argc, char** argv)
2657 signal(SIGINT, signal_handler);
2658 signal(SIGTERM, signal_handler);
2660 diag = rte_eal_init(argc, argv);
2662 rte_panic("Cannot init EAL\n");
2664 testpmd_logtype = rte_log_register("testpmd");
2665 if (testpmd_logtype < 0)
2666 rte_panic("Cannot register log type");
2667 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2669 #ifdef RTE_LIBRTE_PDUMP
2670 /* initialize packet capture framework */
2671 rte_pdump_init(NULL);
2674 nb_ports = (portid_t) rte_eth_dev_count_avail();
2676 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2678 /* allocate port structures, and init them */
2681 set_def_fwd_config();
2683 rte_panic("Empty set of forwarding logical cores - check the "
2684 "core mask supplied in the command parameters\n");
2686 /* Bitrate/latency stats disabled by default */
2687 #ifdef RTE_LIBRTE_BITRATE
2688 bitrate_enabled = 0;
2690 #ifdef RTE_LIBRTE_LATENCY_STATS
2691 latencystats_enabled = 0;
2694 /* on FreeBSD, mlockall() is disabled by default */
2695 #ifdef RTE_EXEC_ENV_BSDAPP
2704 launch_args_parse(argc, argv);
2706 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
2707 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2711 if (tx_first && interactive)
2712 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2713 "interactive mode.\n");
2715 if (tx_first && lsc_interrupt) {
2716 printf("Warning: lsc_interrupt needs to be off when "
2717 " using tx_first. Disabling.\n");
2721 if (!nb_rxq && !nb_txq)
2722 printf("Warning: Either rx or tx queues should be non-zero\n");
2724 if (nb_rxq > 1 && nb_rxq > nb_txq)
2725 printf("Warning: nb_rxq=%d enables RSS configuration, "
2726 "but nb_txq=%d will prevent to fully test it.\n",
2732 /* enable hot plug monitoring */
2733 ret = rte_dev_event_monitor_start();
2738 eth_dev_event_callback_register();
2742 if (start_port(RTE_PORT_ALL) != 0)
2743 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2745 /* set all ports to promiscuous mode by default */
2746 RTE_ETH_FOREACH_DEV(port_id)
2747 rte_eth_promiscuous_enable(port_id);
2749 /* Init metrics library */
2750 rte_metrics_init(rte_socket_id());
2752 #ifdef RTE_LIBRTE_LATENCY_STATS
2753 if (latencystats_enabled != 0) {
2754 int ret = rte_latencystats_init(1, NULL);
2756 printf("Warning: latencystats init()"
2757 " returned error %d\n", ret);
2758 printf("Latencystats running on lcore %d\n",
2759 latencystats_lcore_id);
2763 /* Setup bitrate stats */
2764 #ifdef RTE_LIBRTE_BITRATE
2765 if (bitrate_enabled != 0) {
2766 bitrate_data = rte_stats_bitrate_create();
2767 if (bitrate_data == NULL)
2768 rte_exit(EXIT_FAILURE,
2769 "Could not allocate bitrate data.\n");
2770 rte_stats_bitrate_reg(bitrate_data);
2774 #ifdef RTE_LIBRTE_CMDLINE
2775 if (strlen(cmdline_filename) != 0)
2776 cmdline_read_from_file(cmdline_filename);
2778 if (interactive == 1) {
2780 printf("Start automatic packet forwarding\n");
2781 start_packet_forwarding(0);
2793 printf("No commandline core given, start packet forwarding\n");
2794 start_packet_forwarding(tx_first);
2795 if (stats_period != 0) {
2796 uint64_t prev_time = 0, cur_time, diff_time = 0;
2797 uint64_t timer_period;
2799 /* Convert to number of cycles */
2800 timer_period = stats_period * rte_get_timer_hz();
2802 while (f_quit == 0) {
2803 cur_time = rte_get_timer_cycles();
2804 diff_time += cur_time - prev_time;
2806 if (diff_time >= timer_period) {
2808 /* Reset the timer */
2811 /* Sleep to avoid unnecessary checks */
2812 prev_time = cur_time;
2817 printf("Press enter to exit\n");
2818 rc = read(0, &c, 1);