1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
16 #include <sys/queue.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
72 char cmdline_filename[PATH_MAX] = {0};
75 * NUMA support configuration.
76 * When set, the NUMA support attempts to dispatch the allocation of the
77 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78 * probed ports among the CPU sockets 0 and 1.
79 * Otherwise, all memory is allocated from CPU socket 0.
81 uint8_t numa_support = 1; /**< numa enabled by default */
84 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
87 uint8_t socket_num = UMA_NO_CONFIG;
90 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
95 * Store specified sockets on which memory pool to be used by ports
98 uint8_t port_numa[RTE_MAX_ETHPORTS];
101 * Store specified sockets on which RX ring to be used by ports
104 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
107 * Store specified sockets on which TX ring to be used by ports
110 uint8_t txring_numa[RTE_MAX_ETHPORTS];
113 * Record the Ethernet address of peer target ports to which packets are
115 * Must be instantiated with the ethernet addresses of peer traffic generator
118 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
119 portid_t nb_peer_eth_addrs = 0;
122 * Probed Target Environment.
124 struct rte_port *ports; /**< For all probed ethernet ports. */
125 portid_t nb_ports; /**< Number of probed ethernet ports. */
126 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
127 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
130 * Test Forwarding Configuration.
131 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
132 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
134 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
135 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
136 portid_t nb_cfg_ports; /**< Number of configured ports. */
137 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
139 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
140 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
142 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
143 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
146 * Forwarding engines.
148 struct fwd_engine * fwd_engines[] = {
157 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 &softnic_tm_bypass_engine,
161 #ifdef RTE_LIBRTE_IEEE1588
162 &ieee1588_fwd_engine,
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
175 * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179 * In container, it cannot terminate the process which running with 'stats-period'
180 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
214 #define RTE_TEST_RX_DESC_DEFAULT 1024
215 #define RTE_TEST_TX_DESC_DEFAULT 1024
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 #define RTE_PMD_PARAM_UNSET -1
221 * Configurable values of RX and TX ring threshold registers.
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 * Configurable value of RX free threshold.
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 * Configurable value of RX drop enable.
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 * Configurable value of TX free threshold.
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 * Configurable value of TX RS bit threshold.
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 * Receive Side Scaling (RSS) configuration.
255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 * Port topology configuration
260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 * Avoids to flush all the RX streams before starts forwarding.
265 uint8_t no_flush_rx = 0; /* flush by default */
268 * Flow API isolated mode.
270 uint8_t flow_isolate_all;
273 * Avoids to check link status when starting/stopping a port.
275 uint8_t no_link_check = 0; /* check by default */
278 * Enable link status change notification
280 uint8_t lsc_interrupt = 1; /* enabled by default */
283 * Enable device removal notification.
285 uint8_t rmv_interrupt = 1; /* enabled by default */
288 * Display or mask ether events
289 * Default to all events except VF_MBOX
291 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
292 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
293 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
294 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
295 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
299 * NIC bypass mode configuration options.
302 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
303 /* The NIC bypass watchdog timeout. */
304 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
308 #ifdef RTE_LIBRTE_LATENCY_STATS
311 * Set when latency stats is enabled in the commandline
313 uint8_t latencystats_enabled;
316 * Lcore ID to serive latency statistics.
318 lcoreid_t latencystats_lcore_id = -1;
323 * Ethernet device configuration.
325 struct rte_eth_rxmode rx_mode = {
326 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
327 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
328 .ignore_offload_bitfield = 1,
331 struct rte_eth_txmode tx_mode = {
332 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
335 struct rte_fdir_conf fdir_conf = {
336 .mode = RTE_FDIR_MODE_NONE,
337 .pballoc = RTE_FDIR_PBALLOC_64K,
338 .status = RTE_FDIR_REPORT_STATUS,
340 .vlan_tci_mask = 0x0,
342 .src_ip = 0xFFFFFFFF,
343 .dst_ip = 0xFFFFFFFF,
346 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
347 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
349 .src_port_mask = 0xFFFF,
350 .dst_port_mask = 0xFFFF,
351 .mac_addr_byte_mask = 0xFF,
352 .tunnel_type_mask = 1,
353 .tunnel_id_mask = 0xFFFFFFFF,
358 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
360 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
361 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
363 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
364 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
366 uint16_t nb_tx_queue_stats_mappings = 0;
367 uint16_t nb_rx_queue_stats_mappings = 0;
370 * Display zero values by default for xstats
372 uint8_t xstats_hide_zero;
374 unsigned int num_sockets = 0;
375 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
377 #ifdef RTE_LIBRTE_BITRATE
378 /* Bitrate statistics */
379 struct rte_stats_bitrates *bitrate_data;
380 lcoreid_t bitrate_lcore_id;
381 uint8_t bitrate_enabled;
384 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
385 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
387 /* Forward function declarations */
388 static void map_port_queue_stats_mapping_registers(portid_t pi,
389 struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 enum rte_eth_event_type type,
393 void *param, void *ret_param);
396 * Check if all the ports are started.
397 * If yes, return positive value. If not, return zero.
399 static int all_ports_started(void);
401 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
402 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
405 * Helper function to check if socket is already discovered.
406 * If yes, return positive value. If not, return zero.
409 new_socket_id(unsigned int socket_id)
413 for (i = 0; i < num_sockets; i++) {
414 if (socket_ids[i] == socket_id)
421 * Setup default configuration.
424 set_default_fwd_lcores_config(void)
428 unsigned int sock_num;
431 for (i = 0; i < RTE_MAX_LCORE; i++) {
432 sock_num = rte_lcore_to_socket_id(i);
433 if (new_socket_id(sock_num)) {
434 if (num_sockets >= RTE_MAX_NUMA_NODES) {
435 rte_exit(EXIT_FAILURE,
436 "Total sockets greater than %u\n",
439 socket_ids[num_sockets++] = sock_num;
441 if (!rte_lcore_is_enabled(i))
443 if (i == rte_get_master_lcore())
445 fwd_lcores_cpuids[nb_lc++] = i;
447 nb_lcores = (lcoreid_t) nb_lc;
448 nb_cfg_lcores = nb_lcores;
453 set_def_peer_eth_addrs(void)
457 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
459 peer_eth_addrs[i].addr_bytes[5] = i;
464 set_default_fwd_ports_config(void)
469 RTE_ETH_FOREACH_DEV(pt_id)
470 fwd_ports_ids[i++] = pt_id;
472 nb_cfg_ports = nb_ports;
473 nb_fwd_ports = nb_ports;
477 set_def_fwd_config(void)
479 set_default_fwd_lcores_config();
480 set_def_peer_eth_addrs();
481 set_default_fwd_ports_config();
485 * Configuration initialisation done once at init time.
488 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
489 unsigned int socket_id)
491 char pool_name[RTE_MEMPOOL_NAMESIZE];
492 struct rte_mempool *rte_mp = NULL;
495 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
496 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
499 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
500 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
503 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504 mb_size, (unsigned) mb_mempool_cache,
505 sizeof(struct rte_pktmbuf_pool_private),
510 if (rte_mempool_populate_anon(rte_mp) == 0) {
511 rte_mempool_free(rte_mp);
515 rte_pktmbuf_pool_init(rte_mp, NULL);
516 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
518 /* wrapper to rte_mempool_create() */
519 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
520 rte_mbuf_best_mempool_ops());
521 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
522 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
526 if (rte_mp == NULL) {
527 rte_exit(EXIT_FAILURE,
528 "Creation of mbuf pool for socket %u failed: %s\n",
529 socket_id, rte_strerror(rte_errno));
530 } else if (verbose_level > 0) {
531 rte_mempool_dump(stdout, rte_mp);
536 * Check given socket id is valid or not with NUMA mode,
537 * if valid, return 0, else return -1
540 check_socket_id(const unsigned int socket_id)
542 static int warning_once = 0;
544 if (new_socket_id(socket_id)) {
545 if (!warning_once && numa_support)
546 printf("Warning: NUMA should be configured manually by"
547 " using --port-numa-config and"
548 " --ring-numa-config parameters along with"
557 * Get the allowed maximum number of RX queues.
558 * *pid return the port id which has minimal value of
559 * max_rx_queues in all ports.
562 get_allowed_max_nb_rxq(portid_t *pid)
564 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
566 struct rte_eth_dev_info dev_info;
568 RTE_ETH_FOREACH_DEV(pi) {
569 rte_eth_dev_info_get(pi, &dev_info);
570 if (dev_info.max_rx_queues < allowed_max_rxq) {
571 allowed_max_rxq = dev_info.max_rx_queues;
575 return allowed_max_rxq;
579 * Check input rxq is valid or not.
580 * If input rxq is not greater than any of maximum number
581 * of RX queues of all ports, it is valid.
582 * if valid, return 0, else return -1
585 check_nb_rxq(queueid_t rxq)
587 queueid_t allowed_max_rxq;
590 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
591 if (rxq > allowed_max_rxq) {
592 printf("Fail: input rxq (%u) can't be greater "
593 "than max_rx_queues (%u) of port %u\n",
603 * Get the allowed maximum number of TX queues.
604 * *pid return the port id which has minimal value of
605 * max_tx_queues in all ports.
608 get_allowed_max_nb_txq(portid_t *pid)
610 queueid_t allowed_max_txq = MAX_QUEUE_ID;
612 struct rte_eth_dev_info dev_info;
614 RTE_ETH_FOREACH_DEV(pi) {
615 rte_eth_dev_info_get(pi, &dev_info);
616 if (dev_info.max_tx_queues < allowed_max_txq) {
617 allowed_max_txq = dev_info.max_tx_queues;
621 return allowed_max_txq;
625 * Check input txq is valid or not.
626 * If input txq is not greater than any of maximum number
627 * of TX queues of all ports, it is valid.
628 * if valid, return 0, else return -1
631 check_nb_txq(queueid_t txq)
633 queueid_t allowed_max_txq;
636 allowed_max_txq = get_allowed_max_nb_txq(&pid);
637 if (txq > allowed_max_txq) {
638 printf("Fail: input txq (%u) can't be greater "
639 "than max_tx_queues (%u) of port %u\n",
652 struct rte_port *port;
653 struct rte_mempool *mbp;
654 unsigned int nb_mbuf_per_pool;
656 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
657 struct rte_gro_param gro_param;
660 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
663 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
664 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
665 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
668 /* Configuration of logical cores. */
669 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
670 sizeof(struct fwd_lcore *) * nb_lcores,
671 RTE_CACHE_LINE_SIZE);
672 if (fwd_lcores == NULL) {
673 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
674 "failed\n", nb_lcores);
676 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
677 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
678 sizeof(struct fwd_lcore),
679 RTE_CACHE_LINE_SIZE);
680 if (fwd_lcores[lc_id] == NULL) {
681 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
684 fwd_lcores[lc_id]->cpuid_idx = lc_id;
687 RTE_ETH_FOREACH_DEV(pid) {
689 /* Apply default TxRx configuration for all ports */
690 port->dev_conf.txmode = tx_mode;
691 port->dev_conf.rxmode = rx_mode;
692 rte_eth_dev_info_get(pid, &port->dev_info);
693 if (!(port->dev_info.tx_offload_capa &
694 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
695 port->dev_conf.txmode.offloads &=
696 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
697 if (!(port->dev_info.rx_offload_capa &
698 DEV_RX_OFFLOAD_CRC_STRIP))
699 port->dev_conf.rxmode.offloads &=
700 ~DEV_RX_OFFLOAD_CRC_STRIP;
702 if (port_numa[pid] != NUMA_NO_CONFIG)
703 port_per_socket[port_numa[pid]]++;
705 uint32_t socket_id = rte_eth_dev_socket_id(pid);
707 /* if socket_id is invalid, set to 0 */
708 if (check_socket_id(socket_id) < 0)
710 port_per_socket[socket_id]++;
714 /* set flag to initialize port/queue */
715 port->need_reconfig = 1;
716 port->need_reconfig_queues = 1;
720 * Create pools of mbuf.
721 * If NUMA support is disabled, create a single pool of mbuf in
722 * socket 0 memory by default.
723 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
725 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
726 * nb_txd can be configured at run time.
728 if (param_total_num_mbufs)
729 nb_mbuf_per_pool = param_total_num_mbufs;
731 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
732 (nb_lcores * mb_mempool_cache) +
733 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
734 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
740 for (i = 0; i < num_sockets; i++)
741 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
744 if (socket_num == UMA_NO_CONFIG)
745 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
747 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
753 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
754 DEV_TX_OFFLOAD_GRE_TNL_TSO;
756 * Records which Mbuf pool to use by each logical core, if needed.
758 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
759 mbp = mbuf_pool_find(
760 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
763 mbp = mbuf_pool_find(0);
764 fwd_lcores[lc_id]->mbp = mbp;
765 /* initialize GSO context */
766 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
767 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
768 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
769 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
771 fwd_lcores[lc_id]->gso_ctx.flag = 0;
774 /* Configuration of packet forwarding streams. */
775 if (init_fwd_streams() < 0)
776 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
780 /* create a gro context for each lcore */
781 gro_param.gro_types = RTE_GRO_TCP_IPV4;
782 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
783 gro_param.max_item_per_flow = MAX_PKT_BURST;
784 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
785 gro_param.socket_id = rte_lcore_to_socket_id(
786 fwd_lcores_cpuids[lc_id]);
787 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
788 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
789 rte_exit(EXIT_FAILURE,
790 "rte_gro_ctx_create() failed\n");
797 reconfig(portid_t new_port_id, unsigned socket_id)
799 struct rte_port *port;
801 /* Reconfiguration of Ethernet ports. */
802 port = &ports[new_port_id];
803 rte_eth_dev_info_get(new_port_id, &port->dev_info);
805 /* set flag to initialize port/queue */
806 port->need_reconfig = 1;
807 port->need_reconfig_queues = 1;
808 port->socket_id = socket_id;
815 init_fwd_streams(void)
818 struct rte_port *port;
819 streamid_t sm_id, nb_fwd_streams_new;
822 /* set socket id according to numa or not */
823 RTE_ETH_FOREACH_DEV(pid) {
825 if (nb_rxq > port->dev_info.max_rx_queues) {
826 printf("Fail: nb_rxq(%d) is greater than "
827 "max_rx_queues(%d)\n", nb_rxq,
828 port->dev_info.max_rx_queues);
831 if (nb_txq > port->dev_info.max_tx_queues) {
832 printf("Fail: nb_txq(%d) is greater than "
833 "max_tx_queues(%d)\n", nb_txq,
834 port->dev_info.max_tx_queues);
838 if (port_numa[pid] != NUMA_NO_CONFIG)
839 port->socket_id = port_numa[pid];
841 port->socket_id = rte_eth_dev_socket_id(pid);
843 /* if socket_id is invalid, set to 0 */
844 if (check_socket_id(port->socket_id) < 0)
849 if (socket_num == UMA_NO_CONFIG)
852 port->socket_id = socket_num;
856 q = RTE_MAX(nb_rxq, nb_txq);
858 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
861 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
862 if (nb_fwd_streams_new == nb_fwd_streams)
865 if (fwd_streams != NULL) {
866 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
867 if (fwd_streams[sm_id] == NULL)
869 rte_free(fwd_streams[sm_id]);
870 fwd_streams[sm_id] = NULL;
872 rte_free(fwd_streams);
877 nb_fwd_streams = nb_fwd_streams_new;
878 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
879 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
880 if (fwd_streams == NULL)
881 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
882 "failed\n", nb_fwd_streams);
884 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
885 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
886 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
887 if (fwd_streams[sm_id] == NULL)
888 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
895 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
897 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
899 unsigned int total_burst;
900 unsigned int nb_burst;
901 unsigned int burst_stats[3];
902 uint16_t pktnb_stats[3];
904 int burst_percent[3];
907 * First compute the total number of packet bursts and the
908 * two highest numbers of bursts of the same number of packets.
911 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
912 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
913 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
914 nb_burst = pbs->pkt_burst_spread[nb_pkt];
917 total_burst += nb_burst;
918 if (nb_burst > burst_stats[0]) {
919 burst_stats[1] = burst_stats[0];
920 pktnb_stats[1] = pktnb_stats[0];
921 burst_stats[0] = nb_burst;
922 pktnb_stats[0] = nb_pkt;
925 if (total_burst == 0)
927 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
928 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
929 burst_percent[0], (int) pktnb_stats[0]);
930 if (burst_stats[0] == total_burst) {
934 if (burst_stats[0] + burst_stats[1] == total_burst) {
935 printf(" + %d%% of %d pkts]\n",
936 100 - burst_percent[0], pktnb_stats[1]);
939 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
940 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
941 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
942 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
945 printf(" + %d%% of %d pkts + %d%% of others]\n",
946 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
948 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
951 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
953 struct rte_port *port;
956 static const char *fwd_stats_border = "----------------------";
958 port = &ports[port_id];
959 printf("\n %s Forward statistics for port %-2d %s\n",
960 fwd_stats_border, port_id, fwd_stats_border);
962 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
963 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
965 stats->ipackets, stats->imissed,
966 (uint64_t) (stats->ipackets + stats->imissed));
968 if (cur_fwd_eng == &csum_fwd_engine)
969 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
970 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
971 if ((stats->ierrors + stats->rx_nombuf) > 0) {
972 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
973 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
976 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
978 stats->opackets, port->tx_dropped,
979 (uint64_t) (stats->opackets + port->tx_dropped));
982 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
984 stats->ipackets, stats->imissed,
985 (uint64_t) (stats->ipackets + stats->imissed));
987 if (cur_fwd_eng == &csum_fwd_engine)
988 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
989 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
990 if ((stats->ierrors + stats->rx_nombuf) > 0) {
991 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
992 printf(" RX-nombufs: %14"PRIu64"\n",
996 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
998 stats->opackets, port->tx_dropped,
999 (uint64_t) (stats->opackets + port->tx_dropped));
1002 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1003 if (port->rx_stream)
1004 pkt_burst_stats_display("RX",
1005 &port->rx_stream->rx_burst_stats);
1006 if (port->tx_stream)
1007 pkt_burst_stats_display("TX",
1008 &port->tx_stream->tx_burst_stats);
1011 if (port->rx_queue_stats_mapping_enabled) {
1013 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1014 printf(" Stats reg %2d RX-packets:%14"PRIu64
1015 " RX-errors:%14"PRIu64
1016 " RX-bytes:%14"PRIu64"\n",
1017 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1021 if (port->tx_queue_stats_mapping_enabled) {
1022 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1023 printf(" Stats reg %2d TX-packets:%14"PRIu64
1024 " TX-bytes:%14"PRIu64"\n",
1025 i, stats->q_opackets[i], stats->q_obytes[i]);
1029 printf(" %s--------------------------------%s\n",
1030 fwd_stats_border, fwd_stats_border);
1034 fwd_stream_stats_display(streamid_t stream_id)
1036 struct fwd_stream *fs;
1037 static const char *fwd_top_stats_border = "-------";
1039 fs = fwd_streams[stream_id];
1040 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1041 (fs->fwd_dropped == 0))
1043 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1044 "TX Port=%2d/Queue=%2d %s\n",
1045 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1046 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1047 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1048 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1050 /* if checksum mode */
1051 if (cur_fwd_eng == &csum_fwd_engine) {
1052 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1053 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1056 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1057 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1058 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1063 flush_fwd_rx_queues(void)
1065 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1072 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1073 uint64_t timer_period;
1075 /* convert to number of cycles */
1076 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1078 for (j = 0; j < 2; j++) {
1079 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1080 for (rxq = 0; rxq < nb_rxq; rxq++) {
1081 port_id = fwd_ports_ids[rxp];
1083 * testpmd can stuck in the below do while loop
1084 * if rte_eth_rx_burst() always returns nonzero
1085 * packets. So timer is added to exit this loop
1086 * after 1sec timer expiry.
1088 prev_tsc = rte_rdtsc();
1090 nb_rx = rte_eth_rx_burst(port_id, rxq,
1091 pkts_burst, MAX_PKT_BURST);
1092 for (i = 0; i < nb_rx; i++)
1093 rte_pktmbuf_free(pkts_burst[i]);
1095 cur_tsc = rte_rdtsc();
1096 diff_tsc = cur_tsc - prev_tsc;
1097 timer_tsc += diff_tsc;
1098 } while ((nb_rx > 0) &&
1099 (timer_tsc < timer_period));
1103 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1108 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1110 struct fwd_stream **fsm;
1113 #ifdef RTE_LIBRTE_BITRATE
1114 uint64_t tics_per_1sec;
1115 uint64_t tics_datum;
1116 uint64_t tics_current;
1117 uint8_t idx_port, cnt_ports;
1119 cnt_ports = rte_eth_dev_count();
1120 tics_datum = rte_rdtsc();
1121 tics_per_1sec = rte_get_timer_hz();
1123 fsm = &fwd_streams[fc->stream_idx];
1124 nb_fs = fc->stream_nb;
1126 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1127 (*pkt_fwd)(fsm[sm_id]);
1128 #ifdef RTE_LIBRTE_BITRATE
1129 if (bitrate_enabled != 0 &&
1130 bitrate_lcore_id == rte_lcore_id()) {
1131 tics_current = rte_rdtsc();
1132 if (tics_current - tics_datum >= tics_per_1sec) {
1133 /* Periodic bitrate calculation */
1135 idx_port < cnt_ports;
1137 rte_stats_bitrate_calc(bitrate_data,
1139 tics_datum = tics_current;
1143 #ifdef RTE_LIBRTE_LATENCY_STATS
1144 if (latencystats_enabled != 0 &&
1145 latencystats_lcore_id == rte_lcore_id())
1146 rte_latencystats_update();
1149 } while (! fc->stopped);
1153 start_pkt_forward_on_core(void *fwd_arg)
1155 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1156 cur_fwd_config.fwd_eng->packet_fwd);
1161 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1162 * Used to start communication flows in network loopback test configurations.
1165 run_one_txonly_burst_on_core(void *fwd_arg)
1167 struct fwd_lcore *fwd_lc;
1168 struct fwd_lcore tmp_lcore;
1170 fwd_lc = (struct fwd_lcore *) fwd_arg;
1171 tmp_lcore = *fwd_lc;
1172 tmp_lcore.stopped = 1;
1173 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1178 * Launch packet forwarding:
1179 * - Setup per-port forwarding context.
1180 * - launch logical cores with their forwarding configuration.
1183 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1185 port_fwd_begin_t port_fwd_begin;
1190 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1191 if (port_fwd_begin != NULL) {
1192 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1193 (*port_fwd_begin)(fwd_ports_ids[i]);
1195 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1196 lc_id = fwd_lcores_cpuids[i];
1197 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1198 fwd_lcores[i]->stopped = 0;
1199 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1200 fwd_lcores[i], lc_id);
1202 printf("launch lcore %u failed - diag=%d\n",
1209 * Launch packet forwarding configuration.
1212 start_packet_forwarding(int with_tx_first)
1214 port_fwd_begin_t port_fwd_begin;
1215 port_fwd_end_t port_fwd_end;
1216 struct rte_port *port;
1221 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1222 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1224 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1225 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1227 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1228 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1229 (!nb_rxq || !nb_txq))
1230 rte_exit(EXIT_FAILURE,
1231 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1232 cur_fwd_eng->fwd_mode_name);
1234 if (all_ports_started() == 0) {
1235 printf("Not all ports were started\n");
1238 if (test_done == 0) {
1239 printf("Packet forwarding already started\n");
1243 if (init_fwd_streams() < 0) {
1244 printf("Fail from init_fwd_streams()\n");
1249 for (i = 0; i < nb_fwd_ports; i++) {
1250 pt_id = fwd_ports_ids[i];
1251 port = &ports[pt_id];
1252 if (!port->dcb_flag) {
1253 printf("In DCB mode, all forwarding ports must "
1254 "be configured in this mode.\n");
1258 if (nb_fwd_lcores == 1) {
1259 printf("In DCB mode,the nb forwarding cores "
1260 "should be larger than 1.\n");
1267 flush_fwd_rx_queues();
1270 pkt_fwd_config_display(&cur_fwd_config);
1271 rxtx_config_display();
1273 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1274 pt_id = fwd_ports_ids[i];
1275 port = &ports[pt_id];
1276 rte_eth_stats_get(pt_id, &port->stats);
1277 port->tx_dropped = 0;
1279 map_port_queue_stats_mapping_registers(pt_id, port);
1281 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1282 fwd_streams[sm_id]->rx_packets = 0;
1283 fwd_streams[sm_id]->tx_packets = 0;
1284 fwd_streams[sm_id]->fwd_dropped = 0;
1285 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1286 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1288 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1289 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1290 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1291 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1292 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1294 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1295 fwd_streams[sm_id]->core_cycles = 0;
1298 if (with_tx_first) {
1299 port_fwd_begin = tx_only_engine.port_fwd_begin;
1300 if (port_fwd_begin != NULL) {
1301 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1302 (*port_fwd_begin)(fwd_ports_ids[i]);
1304 while (with_tx_first--) {
1305 launch_packet_forwarding(
1306 run_one_txonly_burst_on_core);
1307 rte_eal_mp_wait_lcore();
1309 port_fwd_end = tx_only_engine.port_fwd_end;
1310 if (port_fwd_end != NULL) {
1311 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1312 (*port_fwd_end)(fwd_ports_ids[i]);
1315 launch_packet_forwarding(start_pkt_forward_on_core);
1319 stop_packet_forwarding(void)
1321 struct rte_eth_stats stats;
1322 struct rte_port *port;
1323 port_fwd_end_t port_fwd_end;
1328 uint64_t total_recv;
1329 uint64_t total_xmit;
1330 uint64_t total_rx_dropped;
1331 uint64_t total_tx_dropped;
1332 uint64_t total_rx_nombuf;
1333 uint64_t tx_dropped;
1334 uint64_t rx_bad_ip_csum;
1335 uint64_t rx_bad_l4_csum;
1336 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1337 uint64_t fwd_cycles;
1340 static const char *acc_stats_border = "+++++++++++++++";
1343 printf("Packet forwarding not started\n");
1346 printf("Telling cores to stop...");
1347 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1348 fwd_lcores[lc_id]->stopped = 1;
1349 printf("\nWaiting for lcores to finish...\n");
1350 rte_eal_mp_wait_lcore();
1351 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1352 if (port_fwd_end != NULL) {
1353 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1354 pt_id = fwd_ports_ids[i];
1355 (*port_fwd_end)(pt_id);
1358 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1361 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1362 if (cur_fwd_config.nb_fwd_streams >
1363 cur_fwd_config.nb_fwd_ports) {
1364 fwd_stream_stats_display(sm_id);
1365 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1366 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1368 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1370 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1373 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1374 tx_dropped = (uint64_t) (tx_dropped +
1375 fwd_streams[sm_id]->fwd_dropped);
1376 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1379 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1380 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1381 fwd_streams[sm_id]->rx_bad_ip_csum);
1382 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1386 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1387 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1388 fwd_streams[sm_id]->rx_bad_l4_csum);
1389 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1392 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1393 fwd_cycles = (uint64_t) (fwd_cycles +
1394 fwd_streams[sm_id]->core_cycles);
1399 total_rx_dropped = 0;
1400 total_tx_dropped = 0;
1401 total_rx_nombuf = 0;
1402 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1403 pt_id = fwd_ports_ids[i];
1405 port = &ports[pt_id];
1406 rte_eth_stats_get(pt_id, &stats);
1407 stats.ipackets -= port->stats.ipackets;
1408 port->stats.ipackets = 0;
1409 stats.opackets -= port->stats.opackets;
1410 port->stats.opackets = 0;
1411 stats.ibytes -= port->stats.ibytes;
1412 port->stats.ibytes = 0;
1413 stats.obytes -= port->stats.obytes;
1414 port->stats.obytes = 0;
1415 stats.imissed -= port->stats.imissed;
1416 port->stats.imissed = 0;
1417 stats.oerrors -= port->stats.oerrors;
1418 port->stats.oerrors = 0;
1419 stats.rx_nombuf -= port->stats.rx_nombuf;
1420 port->stats.rx_nombuf = 0;
1422 total_recv += stats.ipackets;
1423 total_xmit += stats.opackets;
1424 total_rx_dropped += stats.imissed;
1425 total_tx_dropped += port->tx_dropped;
1426 total_rx_nombuf += stats.rx_nombuf;
1428 fwd_port_stats_display(pt_id, &stats);
1431 printf("\n %s Accumulated forward statistics for all ports"
1433 acc_stats_border, acc_stats_border);
1434 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1436 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1438 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1439 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1440 if (total_rx_nombuf > 0)
1441 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1442 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1444 acc_stats_border, acc_stats_border);
1445 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1447 printf("\n CPU cycles/packet=%u (total cycles="
1448 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1449 (unsigned int)(fwd_cycles / total_recv),
1450 fwd_cycles, total_recv);
1452 printf("\nDone.\n");
1457 dev_set_link_up(portid_t pid)
1459 if (rte_eth_dev_set_link_up(pid) < 0)
1460 printf("\nSet link up fail.\n");
1464 dev_set_link_down(portid_t pid)
1466 if (rte_eth_dev_set_link_down(pid) < 0)
1467 printf("\nSet link down fail.\n");
1471 all_ports_started(void)
1474 struct rte_port *port;
1476 RTE_ETH_FOREACH_DEV(pi) {
1478 /* Check if there is a port which is not started */
1479 if ((port->port_status != RTE_PORT_STARTED) &&
1480 (port->slave_flag == 0))
1484 /* No port is not started */
1489 port_is_stopped(portid_t port_id)
1491 struct rte_port *port = &ports[port_id];
1493 if ((port->port_status != RTE_PORT_STOPPED) &&
1494 (port->slave_flag == 0))
1500 all_ports_stopped(void)
1504 RTE_ETH_FOREACH_DEV(pi) {
1505 if (!port_is_stopped(pi))
1513 port_is_started(portid_t port_id)
1515 if (port_id_is_invalid(port_id, ENABLED_WARN))
1518 if (ports[port_id].port_status != RTE_PORT_STARTED)
1525 port_is_closed(portid_t port_id)
1527 if (port_id_is_invalid(port_id, ENABLED_WARN))
1530 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1537 start_port(portid_t pid)
1539 int diag, need_check_link_status = -1;
1542 struct rte_port *port;
1543 struct ether_addr mac_addr;
1544 enum rte_eth_event_type event_type;
1546 if (port_id_is_invalid(pid, ENABLED_WARN))
1551 RTE_ETH_FOREACH_DEV(pi) {
1552 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1555 need_check_link_status = 0;
1557 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1558 RTE_PORT_HANDLING) == 0) {
1559 printf("Port %d is now not stopped\n", pi);
1563 if (port->need_reconfig > 0) {
1564 port->need_reconfig = 0;
1566 if (flow_isolate_all) {
1567 int ret = port_flow_isolate(pi, 1);
1569 printf("Failed to apply isolated"
1570 " mode on port %d\n", pi);
1575 printf("Configuring Port %d (socket %u)\n", pi,
1577 /* configure port */
1578 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1581 if (rte_atomic16_cmpset(&(port->port_status),
1582 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1583 printf("Port %d can not be set back "
1584 "to stopped\n", pi);
1585 printf("Fail to configure port %d\n", pi);
1586 /* try to reconfigure port next time */
1587 port->need_reconfig = 1;
1591 if (port->need_reconfig_queues > 0) {
1592 port->need_reconfig_queues = 0;
1593 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1594 /* Apply Tx offloads configuration */
1595 port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1596 /* setup tx queues */
1597 for (qi = 0; qi < nb_txq; qi++) {
1598 if ((numa_support) &&
1599 (txring_numa[pi] != NUMA_NO_CONFIG))
1600 diag = rte_eth_tx_queue_setup(pi, qi,
1601 nb_txd,txring_numa[pi],
1604 diag = rte_eth_tx_queue_setup(pi, qi,
1605 nb_txd,port->socket_id,
1611 /* Fail to setup tx queue, return */
1612 if (rte_atomic16_cmpset(&(port->port_status),
1614 RTE_PORT_STOPPED) == 0)
1615 printf("Port %d can not be set back "
1616 "to stopped\n", pi);
1617 printf("Fail to configure port %d tx queues\n", pi);
1618 /* try to reconfigure queues next time */
1619 port->need_reconfig_queues = 1;
1622 /* Apply Rx offloads configuration */
1623 port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1624 /* setup rx queues */
1625 for (qi = 0; qi < nb_rxq; qi++) {
1626 if ((numa_support) &&
1627 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1628 struct rte_mempool * mp =
1629 mbuf_pool_find(rxring_numa[pi]);
1631 printf("Failed to setup RX queue:"
1632 "No mempool allocation"
1633 " on the socket %d\n",
1638 diag = rte_eth_rx_queue_setup(pi, qi,
1639 nb_rxd,rxring_numa[pi],
1640 &(port->rx_conf),mp);
1642 struct rte_mempool *mp =
1643 mbuf_pool_find(port->socket_id);
1645 printf("Failed to setup RX queue:"
1646 "No mempool allocation"
1647 " on the socket %d\n",
1651 diag = rte_eth_rx_queue_setup(pi, qi,
1652 nb_rxd,port->socket_id,
1653 &(port->rx_conf), mp);
1658 /* Fail to setup rx queue, return */
1659 if (rte_atomic16_cmpset(&(port->port_status),
1661 RTE_PORT_STOPPED) == 0)
1662 printf("Port %d can not be set back "
1663 "to stopped\n", pi);
1664 printf("Fail to configure port %d rx queues\n", pi);
1665 /* try to reconfigure queues next time */
1666 port->need_reconfig_queues = 1;
1672 if (rte_eth_dev_start(pi) < 0) {
1673 printf("Fail to start port %d\n", pi);
1675 /* Fail to setup rx queue, return */
1676 if (rte_atomic16_cmpset(&(port->port_status),
1677 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1678 printf("Port %d can not be set back to "
1683 if (rte_atomic16_cmpset(&(port->port_status),
1684 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1685 printf("Port %d can not be set into started\n", pi);
1687 rte_eth_macaddr_get(pi, &mac_addr);
1688 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1689 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1690 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1691 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1693 /* at least one port started, need checking link status */
1694 need_check_link_status = 1;
1697 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1698 event_type < RTE_ETH_EVENT_MAX;
1700 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1705 printf("Failed to setup even callback for event %d\n",
1711 if (need_check_link_status == 1 && !no_link_check)
1712 check_all_ports_link_status(RTE_PORT_ALL);
1713 else if (need_check_link_status == 0)
1714 printf("Please stop the ports first\n");
1721 stop_port(portid_t pid)
1724 struct rte_port *port;
1725 int need_check_link_status = 0;
1732 if (port_id_is_invalid(pid, ENABLED_WARN))
1735 printf("Stopping ports...\n");
1737 RTE_ETH_FOREACH_DEV(pi) {
1738 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1741 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1742 printf("Please remove port %d from forwarding configuration.\n", pi);
1746 if (port_is_bonding_slave(pi)) {
1747 printf("Please remove port %d from bonded device.\n", pi);
1752 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1753 RTE_PORT_HANDLING) == 0)
1756 rte_eth_dev_stop(pi);
1758 if (rte_atomic16_cmpset(&(port->port_status),
1759 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1760 printf("Port %d can not be set into stopped\n", pi);
1761 need_check_link_status = 1;
1763 if (need_check_link_status && !no_link_check)
1764 check_all_ports_link_status(RTE_PORT_ALL);
1770 close_port(portid_t pid)
1773 struct rte_port *port;
1775 if (port_id_is_invalid(pid, ENABLED_WARN))
1778 printf("Closing ports...\n");
1780 RTE_ETH_FOREACH_DEV(pi) {
1781 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1784 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1785 printf("Please remove port %d from forwarding configuration.\n", pi);
1789 if (port_is_bonding_slave(pi)) {
1790 printf("Please remove port %d from bonded device.\n", pi);
1795 if (rte_atomic16_cmpset(&(port->port_status),
1796 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1797 printf("Port %d is already closed\n", pi);
1801 if (rte_atomic16_cmpset(&(port->port_status),
1802 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1803 printf("Port %d is now not stopped\n", pi);
1807 if (port->flow_list)
1808 port_flow_flush(pi);
1809 rte_eth_dev_close(pi);
1811 if (rte_atomic16_cmpset(&(port->port_status),
1812 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1813 printf("Port %d cannot be set to closed\n", pi);
1820 reset_port(portid_t pid)
1824 struct rte_port *port;
1826 if (port_id_is_invalid(pid, ENABLED_WARN))
1829 printf("Resetting ports...\n");
1831 RTE_ETH_FOREACH_DEV(pi) {
1832 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1835 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1836 printf("Please remove port %d from forwarding "
1837 "configuration.\n", pi);
1841 if (port_is_bonding_slave(pi)) {
1842 printf("Please remove port %d from bonded device.\n",
1847 diag = rte_eth_dev_reset(pi);
1850 port->need_reconfig = 1;
1851 port->need_reconfig_queues = 1;
1853 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1861 attach_port(char *identifier)
1864 unsigned int socket_id;
1866 printf("Attaching a new port...\n");
1868 if (identifier == NULL) {
1869 printf("Invalid parameters are specified\n");
1873 if (rte_eth_dev_attach(identifier, &pi))
1876 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1877 /* if socket_id is invalid, set to 0 */
1878 if (check_socket_id(socket_id) < 0)
1880 reconfig(pi, socket_id);
1881 rte_eth_promiscuous_enable(pi);
1883 nb_ports = rte_eth_dev_count();
1885 ports[pi].port_status = RTE_PORT_STOPPED;
1887 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1892 detach_port(portid_t port_id)
1894 char name[RTE_ETH_NAME_MAX_LEN];
1896 printf("Detaching a port...\n");
1898 if (!port_is_closed(port_id)) {
1899 printf("Please close port first\n");
1903 if (ports[port_id].flow_list)
1904 port_flow_flush(port_id);
1906 if (rte_eth_dev_detach(port_id, name)) {
1907 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1911 nb_ports = rte_eth_dev_count();
1913 printf("Port '%s' is detached. Now total ports is %d\n",
1925 stop_packet_forwarding();
1927 if (ports != NULL) {
1929 RTE_ETH_FOREACH_DEV(pt_id) {
1930 printf("\nShutting down port %d...\n", pt_id);
1936 printf("\nBye...\n");
1939 typedef void (*cmd_func_t)(void);
1940 struct pmd_test_command {
1941 const char *cmd_name;
1942 cmd_func_t cmd_func;
1945 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1947 /* Check the link status of all ports in up to 9s, and print them finally */
1949 check_all_ports_link_status(uint32_t port_mask)
1951 #define CHECK_INTERVAL 100 /* 100ms */
1952 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1954 uint8_t count, all_ports_up, print_flag = 0;
1955 struct rte_eth_link link;
1957 printf("Checking link statuses...\n");
1959 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1961 RTE_ETH_FOREACH_DEV(portid) {
1962 if ((port_mask & (1 << portid)) == 0)
1964 memset(&link, 0, sizeof(link));
1965 rte_eth_link_get_nowait(portid, &link);
1966 /* print link status if flag set */
1967 if (print_flag == 1) {
1968 if (link.link_status)
1970 "Port%d Link Up. speed %u Mbps- %s\n",
1971 portid, link.link_speed,
1972 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1973 ("full-duplex") : ("half-duplex\n"));
1975 printf("Port %d Link Down\n", portid);
1978 /* clear all_ports_up flag if any link down */
1979 if (link.link_status == ETH_LINK_DOWN) {
1984 /* after finally printing all link status, get out */
1985 if (print_flag == 1)
1988 if (all_ports_up == 0) {
1990 rte_delay_ms(CHECK_INTERVAL);
1993 /* set the print_flag if all ports up or timeout */
1994 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2004 rmv_event_callback(void *arg)
2006 struct rte_eth_dev *dev;
2007 portid_t port_id = (intptr_t)arg;
2009 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2010 dev = &rte_eth_devices[port_id];
2013 close_port(port_id);
2014 printf("removing device %s\n", dev->device->name);
2015 if (rte_eal_dev_detach(dev->device))
2016 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2020 /* This function is used by the interrupt thread */
2022 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2025 static const char * const event_desc[] = {
2026 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2027 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2028 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2029 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2030 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2031 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2032 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2033 [RTE_ETH_EVENT_NEW] = "device probed",
2034 [RTE_ETH_EVENT_DESTROY] = "device released",
2035 [RTE_ETH_EVENT_MAX] = NULL,
2038 RTE_SET_USED(param);
2039 RTE_SET_USED(ret_param);
2041 if (type >= RTE_ETH_EVENT_MAX) {
2042 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2043 port_id, __func__, type);
2045 } else if (event_print_mask & (UINT32_C(1) << type)) {
2046 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2051 if (port_id_is_invalid(port_id, DISABLED_WARN))
2055 case RTE_ETH_EVENT_INTR_RMV:
2056 if (rte_eal_alarm_set(100000,
2057 rmv_event_callback, (void *)(intptr_t)port_id))
2058 fprintf(stderr, "Could not set up deferred device removal\n");
2067 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2071 uint8_t mapping_found = 0;
2073 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2074 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2075 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2076 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2077 tx_queue_stats_mappings[i].queue_id,
2078 tx_queue_stats_mappings[i].stats_counter_id);
2085 port->tx_queue_stats_mapping_enabled = 1;
2090 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2094 uint8_t mapping_found = 0;
2096 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2097 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2098 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2099 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2100 rx_queue_stats_mappings[i].queue_id,
2101 rx_queue_stats_mappings[i].stats_counter_id);
2108 port->rx_queue_stats_mapping_enabled = 1;
2113 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2117 diag = set_tx_queue_stats_mapping_registers(pi, port);
2119 if (diag == -ENOTSUP) {
2120 port->tx_queue_stats_mapping_enabled = 0;
2121 printf("TX queue stats mapping not supported port id=%d\n", pi);
2124 rte_exit(EXIT_FAILURE,
2125 "set_tx_queue_stats_mapping_registers "
2126 "failed for port id=%d diag=%d\n",
2130 diag = set_rx_queue_stats_mapping_registers(pi, port);
2132 if (diag == -ENOTSUP) {
2133 port->rx_queue_stats_mapping_enabled = 0;
2134 printf("RX queue stats mapping not supported port id=%d\n", pi);
2137 rte_exit(EXIT_FAILURE,
2138 "set_rx_queue_stats_mapping_registers "
2139 "failed for port id=%d diag=%d\n",
2145 rxtx_port_config(struct rte_port *port)
2147 port->rx_conf = port->dev_info.default_rxconf;
2148 port->tx_conf = port->dev_info.default_txconf;
2150 /* Check if any RX/TX parameters have been passed */
2151 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2152 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2154 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2155 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2157 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2158 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2160 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2161 port->rx_conf.rx_free_thresh = rx_free_thresh;
2163 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2164 port->rx_conf.rx_drop_en = rx_drop_en;
2166 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2167 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2169 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2170 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2172 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2173 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2175 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2176 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2178 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2179 port->tx_conf.tx_free_thresh = tx_free_thresh;
2183 init_port_config(void)
2186 struct rte_port *port;
2188 RTE_ETH_FOREACH_DEV(pid) {
2190 port->dev_conf.fdir_conf = fdir_conf;
2192 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2193 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2195 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2196 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2199 if (port->dcb_flag == 0) {
2200 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2201 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2203 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2206 rxtx_port_config(port);
2208 rte_eth_macaddr_get(pid, &port->eth_addr);
2210 map_port_queue_stats_mapping_registers(pid, port);
2211 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2212 rte_pmd_ixgbe_bypass_init(pid);
2215 if (lsc_interrupt &&
2216 (rte_eth_devices[pid].data->dev_flags &
2217 RTE_ETH_DEV_INTR_LSC))
2218 port->dev_conf.intr_conf.lsc = 1;
2219 if (rmv_interrupt &&
2220 (rte_eth_devices[pid].data->dev_flags &
2221 RTE_ETH_DEV_INTR_RMV))
2222 port->dev_conf.intr_conf.rmv = 1;
2224 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2225 /* Detect softnic port */
2226 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2227 port->softnic_enable = 1;
2228 memset(&port->softport, 0, sizeof(struct softnic_port));
2230 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2231 port->softport.tm_flag = 1;
2237 void set_port_slave_flag(portid_t slave_pid)
2239 struct rte_port *port;
2241 port = &ports[slave_pid];
2242 port->slave_flag = 1;
2245 void clear_port_slave_flag(portid_t slave_pid)
2247 struct rte_port *port;
2249 port = &ports[slave_pid];
2250 port->slave_flag = 0;
2253 uint8_t port_is_bonding_slave(portid_t slave_pid)
2255 struct rte_port *port;
2257 port = &ports[slave_pid];
2258 return port->slave_flag;
2261 const uint16_t vlan_tags[] = {
2262 0, 1, 2, 3, 4, 5, 6, 7,
2263 8, 9, 10, 11, 12, 13, 14, 15,
2264 16, 17, 18, 19, 20, 21, 22, 23,
2265 24, 25, 26, 27, 28, 29, 30, 31
2269 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2270 enum dcb_mode_enable dcb_mode,
2271 enum rte_eth_nb_tcs num_tcs,
2277 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2278 * given above, and the number of traffic classes available for use.
2280 if (dcb_mode == DCB_VT_ENABLED) {
2281 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2282 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2283 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2284 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2286 /* VMDQ+DCB RX and TX configurations */
2287 vmdq_rx_conf->enable_default_pool = 0;
2288 vmdq_rx_conf->default_pool = 0;
2289 vmdq_rx_conf->nb_queue_pools =
2290 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2291 vmdq_tx_conf->nb_queue_pools =
2292 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2294 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2295 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2296 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2297 vmdq_rx_conf->pool_map[i].pools =
2298 1 << (i % vmdq_rx_conf->nb_queue_pools);
2300 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2301 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2302 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2305 /* set DCB mode of RX and TX of multiple queues */
2306 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2307 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2309 struct rte_eth_dcb_rx_conf *rx_conf =
2310 ð_conf->rx_adv_conf.dcb_rx_conf;
2311 struct rte_eth_dcb_tx_conf *tx_conf =
2312 ð_conf->tx_adv_conf.dcb_tx_conf;
2314 rx_conf->nb_tcs = num_tcs;
2315 tx_conf->nb_tcs = num_tcs;
2317 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2318 rx_conf->dcb_tc[i] = i % num_tcs;
2319 tx_conf->dcb_tc[i] = i % num_tcs;
2321 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2322 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2323 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2327 eth_conf->dcb_capability_en =
2328 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2330 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2336 init_port_dcb_config(portid_t pid,
2337 enum dcb_mode_enable dcb_mode,
2338 enum rte_eth_nb_tcs num_tcs,
2341 struct rte_eth_conf port_conf;
2342 struct rte_port *rte_port;
2346 rte_port = &ports[pid];
2348 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2349 /* Enter DCB configuration status */
2352 port_conf.rxmode = rte_port->dev_conf.rxmode;
2353 port_conf.txmode = rte_port->dev_conf.txmode;
2355 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2356 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2359 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2362 * Write the configuration into the device.
2363 * Set the numbers of RX & TX queues to 0, so
2364 * the RX & TX queues will not be setup.
2366 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2368 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2370 /* If dev_info.vmdq_pool_base is greater than 0,
2371 * the queue id of vmdq pools is started after pf queues.
2373 if (dcb_mode == DCB_VT_ENABLED &&
2374 rte_port->dev_info.vmdq_pool_base > 0) {
2375 printf("VMDQ_DCB multi-queue mode is nonsensical"
2376 " for port %d.", pid);
2380 /* Assume the ports in testpmd have the same dcb capability
2381 * and has the same number of rxq and txq in dcb mode
2383 if (dcb_mode == DCB_VT_ENABLED) {
2384 if (rte_port->dev_info.max_vfs > 0) {
2385 nb_rxq = rte_port->dev_info.nb_rx_queues;
2386 nb_txq = rte_port->dev_info.nb_tx_queues;
2388 nb_rxq = rte_port->dev_info.max_rx_queues;
2389 nb_txq = rte_port->dev_info.max_tx_queues;
2392 /*if vt is disabled, use all pf queues */
2393 if (rte_port->dev_info.vmdq_pool_base == 0) {
2394 nb_rxq = rte_port->dev_info.max_rx_queues;
2395 nb_txq = rte_port->dev_info.max_tx_queues;
2397 nb_rxq = (queueid_t)num_tcs;
2398 nb_txq = (queueid_t)num_tcs;
2402 rx_free_thresh = 64;
2404 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2406 rxtx_port_config(rte_port);
2408 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2409 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2410 rx_vft_set(pid, vlan_tags[i], 1);
2412 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2413 map_port_queue_stats_mapping_registers(pid, rte_port);
2415 rte_port->dcb_flag = 1;
2423 /* Configuration of Ethernet ports. */
2424 ports = rte_zmalloc("testpmd: ports",
2425 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2426 RTE_CACHE_LINE_SIZE);
2427 if (ports == NULL) {
2428 rte_exit(EXIT_FAILURE,
2429 "rte_zmalloc(%d struct rte_port) failed\n",
2445 const char clr[] = { 27, '[', '2', 'J', '\0' };
2446 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2448 /* Clear screen and move to top left */
2449 printf("%s%s", clr, top_left);
2451 printf("\nPort statistics ====================================");
2452 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2453 nic_stats_display(fwd_ports_ids[i]);
2457 signal_handler(int signum)
2459 if (signum == SIGINT || signum == SIGTERM) {
2460 printf("\nSignal %d received, preparing to exit...\n",
2462 #ifdef RTE_LIBRTE_PDUMP
2463 /* uninitialize packet capture framework */
2466 #ifdef RTE_LIBRTE_LATENCY_STATS
2467 rte_latencystats_uninit();
2470 /* Set flag to indicate the force termination. */
2472 /* exit with the expected status */
2473 signal(signum, SIG_DFL);
2474 kill(getpid(), signum);
2479 main(int argc, char** argv)
2484 signal(SIGINT, signal_handler);
2485 signal(SIGTERM, signal_handler);
2487 diag = rte_eal_init(argc, argv);
2489 rte_panic("Cannot init EAL\n");
2491 testpmd_logtype = rte_log_register("testpmd");
2492 if (testpmd_logtype < 0)
2493 rte_panic("Cannot register log type");
2494 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2496 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2497 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2501 #ifdef RTE_LIBRTE_PDUMP
2502 /* initialize packet capture framework */
2503 rte_pdump_init(NULL);
2506 nb_ports = (portid_t) rte_eth_dev_count();
2508 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2510 /* allocate port structures, and init them */
2513 set_def_fwd_config();
2515 rte_panic("Empty set of forwarding logical cores - check the "
2516 "core mask supplied in the command parameters\n");
2518 /* Bitrate/latency stats disabled by default */
2519 #ifdef RTE_LIBRTE_BITRATE
2520 bitrate_enabled = 0;
2522 #ifdef RTE_LIBRTE_LATENCY_STATS
2523 latencystats_enabled = 0;
2529 launch_args_parse(argc, argv);
2531 if (tx_first && interactive)
2532 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2533 "interactive mode.\n");
2535 if (tx_first && lsc_interrupt) {
2536 printf("Warning: lsc_interrupt needs to be off when "
2537 " using tx_first. Disabling.\n");
2541 if (!nb_rxq && !nb_txq)
2542 printf("Warning: Either rx or tx queues should be non-zero\n");
2544 if (nb_rxq > 1 && nb_rxq > nb_txq)
2545 printf("Warning: nb_rxq=%d enables RSS configuration, "
2546 "but nb_txq=%d will prevent to fully test it.\n",
2550 if (start_port(RTE_PORT_ALL) != 0)
2551 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2553 /* set all ports to promiscuous mode by default */
2554 RTE_ETH_FOREACH_DEV(port_id)
2555 rte_eth_promiscuous_enable(port_id);
2557 /* Init metrics library */
2558 rte_metrics_init(rte_socket_id());
2560 #ifdef RTE_LIBRTE_LATENCY_STATS
2561 if (latencystats_enabled != 0) {
2562 int ret = rte_latencystats_init(1, NULL);
2564 printf("Warning: latencystats init()"
2565 " returned error %d\n", ret);
2566 printf("Latencystats running on lcore %d\n",
2567 latencystats_lcore_id);
2571 /* Setup bitrate stats */
2572 #ifdef RTE_LIBRTE_BITRATE
2573 if (bitrate_enabled != 0) {
2574 bitrate_data = rte_stats_bitrate_create();
2575 if (bitrate_data == NULL)
2576 rte_exit(EXIT_FAILURE,
2577 "Could not allocate bitrate data.\n");
2578 rte_stats_bitrate_reg(bitrate_data);
2582 #ifdef RTE_LIBRTE_CMDLINE
2583 if (strlen(cmdline_filename) != 0)
2584 cmdline_read_from_file(cmdline_filename);
2586 if (interactive == 1) {
2588 printf("Start automatic packet forwarding\n");
2589 start_packet_forwarding(0);
2601 printf("No commandline core given, start packet forwarding\n");
2602 start_packet_forwarding(tx_first);
2603 if (stats_period != 0) {
2604 uint64_t prev_time = 0, cur_time, diff_time = 0;
2605 uint64_t timer_period;
2607 /* Convert to number of cycles */
2608 timer_period = stats_period * rte_get_timer_hz();
2610 while (f_quit == 0) {
2611 cur_time = rte_get_timer_cycles();
2612 diff_time += cur_time - prev_time;
2614 if (diff_time >= timer_period) {
2616 /* Reset the timer */
2619 /* Sleep to avoid unnecessary checks */
2620 prev_time = cur_time;
2625 printf("Press enter to exit\n");
2626 rc = read(0, &c, 1);