1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
66 uint16_t verbose_level = 0; /**< Silent by default. */
67 int testpmd_logtype; /**< Log type for testpmd logs */
69 /* use master core for command line ? */
70 uint8_t interactive = 0;
71 uint8_t auto_start = 0;
73 char cmdline_filename[PATH_MAX] = {0};
76 * NUMA support configuration.
77 * When set, the NUMA support attempts to dispatch the allocation of the
78 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
79 * probed ports among the CPU sockets 0 and 1.
80 * Otherwise, all memory is allocated from CPU socket 0.
82 uint8_t numa_support = 1; /**< numa enabled by default */
85 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
88 uint8_t socket_num = UMA_NO_CONFIG;
91 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
96 * Store specified sockets on which memory pool to be used by ports
99 uint8_t port_numa[RTE_MAX_ETHPORTS];
102 * Store specified sockets on which RX ring to be used by ports
105 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
108 * Store specified sockets on which TX ring to be used by ports
111 uint8_t txring_numa[RTE_MAX_ETHPORTS];
114 * Record the Ethernet address of peer target ports to which packets are
116 * Must be instantiated with the ethernet addresses of peer traffic generator
119 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
120 portid_t nb_peer_eth_addrs = 0;
123 * Probed Target Environment.
125 struct rte_port *ports; /**< For all probed ethernet ports. */
126 portid_t nb_ports; /**< Number of probed ethernet ports. */
127 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
128 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
131 * Test Forwarding Configuration.
132 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
133 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
135 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
136 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
137 portid_t nb_cfg_ports; /**< Number of configured ports. */
138 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
140 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
141 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
143 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
144 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
147 * Forwarding engines.
149 struct fwd_engine * fwd_engines[] = {
158 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
160 &softnic_tm_bypass_engine,
162 #ifdef RTE_LIBRTE_IEEE1588
163 &ieee1588_fwd_engine,
168 struct fwd_config cur_fwd_config;
169 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
170 uint32_t retry_enabled;
171 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
172 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
174 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
175 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
176 * specified on command-line. */
177 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
180 * In container, it cannot terminate the process which running with 'stats-period'
181 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
186 * Configuration of packet segments used by the "txonly" processing engine.
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 TXONLY_DEF_PACKET_LEN,
192 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
207 * Configurable number of RX/TX queues.
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
213 * Configurable number of RX/TX ring descriptors.
214 * Defaults are supplied by drivers via ethdev.
216 #define RTE_TEST_RX_DESC_DEFAULT 0
217 #define RTE_TEST_TX_DESC_DEFAULT 0
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
221 #define RTE_PMD_PARAM_UNSET -1
223 * Configurable values of RX and TX ring threshold registers.
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX free threshold.
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of RX drop enable.
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX free threshold.
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX RS bit threshold.
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Flow API isolated mode.
272 uint8_t flow_isolate_all;
275 * Avoids to check link status when starting/stopping a port.
277 uint8_t no_link_check = 0; /* check by default */
280 * Enable link status change notification
282 uint8_t lsc_interrupt = 1; /* enabled by default */
285 * Enable device removal notification.
287 uint8_t rmv_interrupt = 1; /* enabled by default */
289 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
292 * Display or mask ether events
293 * Default to all events except VF_MBOX
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
303 * NIC bypass mode configuration options.
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
312 #ifdef RTE_LIBRTE_LATENCY_STATS
315 * Set when latency stats is enabled in the commandline
317 uint8_t latencystats_enabled;
320 * Lcore ID to serive latency statistics.
322 lcoreid_t latencystats_lcore_id = -1;
327 * Ethernet device configuration.
329 struct rte_eth_rxmode rx_mode = {
330 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
332 .ignore_offload_bitfield = 1,
335 struct rte_eth_txmode tx_mode = {
336 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
339 struct rte_fdir_conf fdir_conf = {
340 .mode = RTE_FDIR_MODE_NONE,
341 .pballoc = RTE_FDIR_PBALLOC_64K,
342 .status = RTE_FDIR_REPORT_STATUS,
344 .vlan_tci_mask = 0x0,
346 .src_ip = 0xFFFFFFFF,
347 .dst_ip = 0xFFFFFFFF,
350 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 .src_port_mask = 0xFFFF,
354 .dst_port_mask = 0xFFFF,
355 .mac_addr_byte_mask = 0xFF,
356 .tunnel_type_mask = 1,
357 .tunnel_id_mask = 0xFFFFFFFF,
362 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
364 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
365 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
368 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
370 uint16_t nb_tx_queue_stats_mappings = 0;
371 uint16_t nb_rx_queue_stats_mappings = 0;
374 * Display zero values by default for xstats
376 uint8_t xstats_hide_zero;
378 unsigned int num_sockets = 0;
379 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
381 #ifdef RTE_LIBRTE_BITRATE
382 /* Bitrate statistics */
383 struct rte_stats_bitrates *bitrate_data;
384 lcoreid_t bitrate_lcore_id;
385 uint8_t bitrate_enabled;
388 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
389 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
391 /* Forward function declarations */
392 static void map_port_queue_stats_mapping_registers(portid_t pi,
393 struct rte_port *port);
394 static void check_all_ports_link_status(uint32_t port_mask);
395 static int eth_event_callback(portid_t port_id,
396 enum rte_eth_event_type type,
397 void *param, void *ret_param);
398 static void eth_dev_event_callback(char *device_name,
399 enum rte_dev_event_type type,
401 static int eth_dev_event_callback_register(void);
402 static int eth_dev_event_callback_unregister(void);
406 * Check if all the ports are started.
407 * If yes, return positive value. If not, return zero.
409 static int all_ports_started(void);
411 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
412 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
415 * Helper function to check if socket is already discovered.
416 * If yes, return positive value. If not, return zero.
419 new_socket_id(unsigned int socket_id)
423 for (i = 0; i < num_sockets; i++) {
424 if (socket_ids[i] == socket_id)
431 * Setup default configuration.
434 set_default_fwd_lcores_config(void)
438 unsigned int sock_num;
441 for (i = 0; i < RTE_MAX_LCORE; i++) {
442 sock_num = rte_lcore_to_socket_id(i);
443 if (new_socket_id(sock_num)) {
444 if (num_sockets >= RTE_MAX_NUMA_NODES) {
445 rte_exit(EXIT_FAILURE,
446 "Total sockets greater than %u\n",
449 socket_ids[num_sockets++] = sock_num;
451 if (!rte_lcore_is_enabled(i))
453 if (i == rte_get_master_lcore())
455 fwd_lcores_cpuids[nb_lc++] = i;
457 nb_lcores = (lcoreid_t) nb_lc;
458 nb_cfg_lcores = nb_lcores;
463 set_def_peer_eth_addrs(void)
467 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
468 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
469 peer_eth_addrs[i].addr_bytes[5] = i;
474 set_default_fwd_ports_config(void)
479 RTE_ETH_FOREACH_DEV(pt_id)
480 fwd_ports_ids[i++] = pt_id;
482 nb_cfg_ports = nb_ports;
483 nb_fwd_ports = nb_ports;
487 set_def_fwd_config(void)
489 set_default_fwd_lcores_config();
490 set_def_peer_eth_addrs();
491 set_default_fwd_ports_config();
495 * Configuration initialisation done once at init time.
498 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
499 unsigned int socket_id)
501 char pool_name[RTE_MEMPOOL_NAMESIZE];
502 struct rte_mempool *rte_mp = NULL;
505 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
506 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
509 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
510 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
513 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
514 mb_size, (unsigned) mb_mempool_cache,
515 sizeof(struct rte_pktmbuf_pool_private),
520 if (rte_mempool_populate_anon(rte_mp) == 0) {
521 rte_mempool_free(rte_mp);
525 rte_pktmbuf_pool_init(rte_mp, NULL);
526 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
528 /* wrapper to rte_mempool_create() */
529 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
530 rte_mbuf_best_mempool_ops());
531 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
532 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
536 if (rte_mp == NULL) {
537 rte_exit(EXIT_FAILURE,
538 "Creation of mbuf pool for socket %u failed: %s\n",
539 socket_id, rte_strerror(rte_errno));
540 } else if (verbose_level > 0) {
541 rte_mempool_dump(stdout, rte_mp);
546 * Check given socket id is valid or not with NUMA mode,
547 * if valid, return 0, else return -1
550 check_socket_id(const unsigned int socket_id)
552 static int warning_once = 0;
554 if (new_socket_id(socket_id)) {
555 if (!warning_once && numa_support)
556 printf("Warning: NUMA should be configured manually by"
557 " using --port-numa-config and"
558 " --ring-numa-config parameters along with"
567 * Get the allowed maximum number of RX queues.
568 * *pid return the port id which has minimal value of
569 * max_rx_queues in all ports.
572 get_allowed_max_nb_rxq(portid_t *pid)
574 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
576 struct rte_eth_dev_info dev_info;
578 RTE_ETH_FOREACH_DEV(pi) {
579 rte_eth_dev_info_get(pi, &dev_info);
580 if (dev_info.max_rx_queues < allowed_max_rxq) {
581 allowed_max_rxq = dev_info.max_rx_queues;
585 return allowed_max_rxq;
589 * Check input rxq is valid or not.
590 * If input rxq is not greater than any of maximum number
591 * of RX queues of all ports, it is valid.
592 * if valid, return 0, else return -1
595 check_nb_rxq(queueid_t rxq)
597 queueid_t allowed_max_rxq;
600 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
601 if (rxq > allowed_max_rxq) {
602 printf("Fail: input rxq (%u) can't be greater "
603 "than max_rx_queues (%u) of port %u\n",
613 * Get the allowed maximum number of TX queues.
614 * *pid return the port id which has minimal value of
615 * max_tx_queues in all ports.
618 get_allowed_max_nb_txq(portid_t *pid)
620 queueid_t allowed_max_txq = MAX_QUEUE_ID;
622 struct rte_eth_dev_info dev_info;
624 RTE_ETH_FOREACH_DEV(pi) {
625 rte_eth_dev_info_get(pi, &dev_info);
626 if (dev_info.max_tx_queues < allowed_max_txq) {
627 allowed_max_txq = dev_info.max_tx_queues;
631 return allowed_max_txq;
635 * Check input txq is valid or not.
636 * If input txq is not greater than any of maximum number
637 * of TX queues of all ports, it is valid.
638 * if valid, return 0, else return -1
641 check_nb_txq(queueid_t txq)
643 queueid_t allowed_max_txq;
646 allowed_max_txq = get_allowed_max_nb_txq(&pid);
647 if (txq > allowed_max_txq) {
648 printf("Fail: input txq (%u) can't be greater "
649 "than max_tx_queues (%u) of port %u\n",
662 struct rte_port *port;
663 struct rte_mempool *mbp;
664 unsigned int nb_mbuf_per_pool;
666 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
667 struct rte_gro_param gro_param;
670 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
673 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
674 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
675 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
678 /* Configuration of logical cores. */
679 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
680 sizeof(struct fwd_lcore *) * nb_lcores,
681 RTE_CACHE_LINE_SIZE);
682 if (fwd_lcores == NULL) {
683 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
684 "failed\n", nb_lcores);
686 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
687 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
688 sizeof(struct fwd_lcore),
689 RTE_CACHE_LINE_SIZE);
690 if (fwd_lcores[lc_id] == NULL) {
691 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
694 fwd_lcores[lc_id]->cpuid_idx = lc_id;
697 RTE_ETH_FOREACH_DEV(pid) {
699 /* Apply default TxRx configuration for all ports */
700 port->dev_conf.txmode = tx_mode;
701 port->dev_conf.rxmode = rx_mode;
702 rte_eth_dev_info_get(pid, &port->dev_info);
703 if (!(port->dev_info.tx_offload_capa &
704 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
705 port->dev_conf.txmode.offloads &=
706 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
708 if (port_numa[pid] != NUMA_NO_CONFIG)
709 port_per_socket[port_numa[pid]]++;
711 uint32_t socket_id = rte_eth_dev_socket_id(pid);
713 /* if socket_id is invalid, set to 0 */
714 if (check_socket_id(socket_id) < 0)
716 port_per_socket[socket_id]++;
720 /* set flag to initialize port/queue */
721 port->need_reconfig = 1;
722 port->need_reconfig_queues = 1;
726 * Create pools of mbuf.
727 * If NUMA support is disabled, create a single pool of mbuf in
728 * socket 0 memory by default.
729 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
731 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
732 * nb_txd can be configured at run time.
734 if (param_total_num_mbufs)
735 nb_mbuf_per_pool = param_total_num_mbufs;
737 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
738 (nb_lcores * mb_mempool_cache) +
739 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
740 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
746 for (i = 0; i < num_sockets; i++)
747 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
750 if (socket_num == UMA_NO_CONFIG)
751 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
753 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
759 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
760 DEV_TX_OFFLOAD_GRE_TNL_TSO;
762 * Records which Mbuf pool to use by each logical core, if needed.
764 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
765 mbp = mbuf_pool_find(
766 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
769 mbp = mbuf_pool_find(0);
770 fwd_lcores[lc_id]->mbp = mbp;
771 /* initialize GSO context */
772 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
773 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
774 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
775 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
777 fwd_lcores[lc_id]->gso_ctx.flag = 0;
780 /* Configuration of packet forwarding streams. */
781 if (init_fwd_streams() < 0)
782 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
786 /* create a gro context for each lcore */
787 gro_param.gro_types = RTE_GRO_TCP_IPV4;
788 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
789 gro_param.max_item_per_flow = MAX_PKT_BURST;
790 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
791 gro_param.socket_id = rte_lcore_to_socket_id(
792 fwd_lcores_cpuids[lc_id]);
793 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
794 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
795 rte_exit(EXIT_FAILURE,
796 "rte_gro_ctx_create() failed\n");
803 reconfig(portid_t new_port_id, unsigned socket_id)
805 struct rte_port *port;
807 /* Reconfiguration of Ethernet ports. */
808 port = &ports[new_port_id];
809 rte_eth_dev_info_get(new_port_id, &port->dev_info);
811 /* set flag to initialize port/queue */
812 port->need_reconfig = 1;
813 port->need_reconfig_queues = 1;
814 port->socket_id = socket_id;
821 init_fwd_streams(void)
824 struct rte_port *port;
825 streamid_t sm_id, nb_fwd_streams_new;
828 /* set socket id according to numa or not */
829 RTE_ETH_FOREACH_DEV(pid) {
831 if (nb_rxq > port->dev_info.max_rx_queues) {
832 printf("Fail: nb_rxq(%d) is greater than "
833 "max_rx_queues(%d)\n", nb_rxq,
834 port->dev_info.max_rx_queues);
837 if (nb_txq > port->dev_info.max_tx_queues) {
838 printf("Fail: nb_txq(%d) is greater than "
839 "max_tx_queues(%d)\n", nb_txq,
840 port->dev_info.max_tx_queues);
844 if (port_numa[pid] != NUMA_NO_CONFIG)
845 port->socket_id = port_numa[pid];
847 port->socket_id = rte_eth_dev_socket_id(pid);
849 /* if socket_id is invalid, set to 0 */
850 if (check_socket_id(port->socket_id) < 0)
855 if (socket_num == UMA_NO_CONFIG)
858 port->socket_id = socket_num;
862 q = RTE_MAX(nb_rxq, nb_txq);
864 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
867 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
868 if (nb_fwd_streams_new == nb_fwd_streams)
871 if (fwd_streams != NULL) {
872 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
873 if (fwd_streams[sm_id] == NULL)
875 rte_free(fwd_streams[sm_id]);
876 fwd_streams[sm_id] = NULL;
878 rte_free(fwd_streams);
883 nb_fwd_streams = nb_fwd_streams_new;
884 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
885 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
886 if (fwd_streams == NULL)
887 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
888 "failed\n", nb_fwd_streams);
890 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
891 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
892 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
893 if (fwd_streams[sm_id] == NULL)
894 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
901 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
903 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
905 unsigned int total_burst;
906 unsigned int nb_burst;
907 unsigned int burst_stats[3];
908 uint16_t pktnb_stats[3];
910 int burst_percent[3];
913 * First compute the total number of packet bursts and the
914 * two highest numbers of bursts of the same number of packets.
917 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
918 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
919 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
920 nb_burst = pbs->pkt_burst_spread[nb_pkt];
923 total_burst += nb_burst;
924 if (nb_burst > burst_stats[0]) {
925 burst_stats[1] = burst_stats[0];
926 pktnb_stats[1] = pktnb_stats[0];
927 burst_stats[0] = nb_burst;
928 pktnb_stats[0] = nb_pkt;
931 if (total_burst == 0)
933 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
934 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
935 burst_percent[0], (int) pktnb_stats[0]);
936 if (burst_stats[0] == total_burst) {
940 if (burst_stats[0] + burst_stats[1] == total_burst) {
941 printf(" + %d%% of %d pkts]\n",
942 100 - burst_percent[0], pktnb_stats[1]);
945 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
946 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
947 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
948 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
951 printf(" + %d%% of %d pkts + %d%% of others]\n",
952 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
954 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
957 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
959 struct rte_port *port;
962 static const char *fwd_stats_border = "----------------------";
964 port = &ports[port_id];
965 printf("\n %s Forward statistics for port %-2d %s\n",
966 fwd_stats_border, port_id, fwd_stats_border);
968 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
969 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
971 stats->ipackets, stats->imissed,
972 (uint64_t) (stats->ipackets + stats->imissed));
974 if (cur_fwd_eng == &csum_fwd_engine)
975 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
976 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
977 if ((stats->ierrors + stats->rx_nombuf) > 0) {
978 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
979 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
982 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
984 stats->opackets, port->tx_dropped,
985 (uint64_t) (stats->opackets + port->tx_dropped));
988 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
990 stats->ipackets, stats->imissed,
991 (uint64_t) (stats->ipackets + stats->imissed));
993 if (cur_fwd_eng == &csum_fwd_engine)
994 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
995 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
996 if ((stats->ierrors + stats->rx_nombuf) > 0) {
997 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
998 printf(" RX-nombufs: %14"PRIu64"\n",
1002 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1004 stats->opackets, port->tx_dropped,
1005 (uint64_t) (stats->opackets + port->tx_dropped));
1008 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1009 if (port->rx_stream)
1010 pkt_burst_stats_display("RX",
1011 &port->rx_stream->rx_burst_stats);
1012 if (port->tx_stream)
1013 pkt_burst_stats_display("TX",
1014 &port->tx_stream->tx_burst_stats);
1017 if (port->rx_queue_stats_mapping_enabled) {
1019 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1020 printf(" Stats reg %2d RX-packets:%14"PRIu64
1021 " RX-errors:%14"PRIu64
1022 " RX-bytes:%14"PRIu64"\n",
1023 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1027 if (port->tx_queue_stats_mapping_enabled) {
1028 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1029 printf(" Stats reg %2d TX-packets:%14"PRIu64
1030 " TX-bytes:%14"PRIu64"\n",
1031 i, stats->q_opackets[i], stats->q_obytes[i]);
1035 printf(" %s--------------------------------%s\n",
1036 fwd_stats_border, fwd_stats_border);
1040 fwd_stream_stats_display(streamid_t stream_id)
1042 struct fwd_stream *fs;
1043 static const char *fwd_top_stats_border = "-------";
1045 fs = fwd_streams[stream_id];
1046 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1047 (fs->fwd_dropped == 0))
1049 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1050 "TX Port=%2d/Queue=%2d %s\n",
1051 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1052 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1053 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1054 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1056 /* if checksum mode */
1057 if (cur_fwd_eng == &csum_fwd_engine) {
1058 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1059 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1062 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1063 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1064 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1069 flush_fwd_rx_queues(void)
1071 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1078 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1079 uint64_t timer_period;
1081 /* convert to number of cycles */
1082 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1084 for (j = 0; j < 2; j++) {
1085 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1086 for (rxq = 0; rxq < nb_rxq; rxq++) {
1087 port_id = fwd_ports_ids[rxp];
1089 * testpmd can stuck in the below do while loop
1090 * if rte_eth_rx_burst() always returns nonzero
1091 * packets. So timer is added to exit this loop
1092 * after 1sec timer expiry.
1094 prev_tsc = rte_rdtsc();
1096 nb_rx = rte_eth_rx_burst(port_id, rxq,
1097 pkts_burst, MAX_PKT_BURST);
1098 for (i = 0; i < nb_rx; i++)
1099 rte_pktmbuf_free(pkts_burst[i]);
1101 cur_tsc = rte_rdtsc();
1102 diff_tsc = cur_tsc - prev_tsc;
1103 timer_tsc += diff_tsc;
1104 } while ((nb_rx > 0) &&
1105 (timer_tsc < timer_period));
1109 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1114 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1116 struct fwd_stream **fsm;
1119 #ifdef RTE_LIBRTE_BITRATE
1120 uint64_t tics_per_1sec;
1121 uint64_t tics_datum;
1122 uint64_t tics_current;
1125 tics_datum = rte_rdtsc();
1126 tics_per_1sec = rte_get_timer_hz();
1128 fsm = &fwd_streams[fc->stream_idx];
1129 nb_fs = fc->stream_nb;
1131 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1132 (*pkt_fwd)(fsm[sm_id]);
1133 #ifdef RTE_LIBRTE_BITRATE
1134 if (bitrate_enabled != 0 &&
1135 bitrate_lcore_id == rte_lcore_id()) {
1136 tics_current = rte_rdtsc();
1137 if (tics_current - tics_datum >= tics_per_1sec) {
1138 /* Periodic bitrate calculation */
1139 RTE_ETH_FOREACH_DEV(idx_port)
1140 rte_stats_bitrate_calc(bitrate_data,
1142 tics_datum = tics_current;
1146 #ifdef RTE_LIBRTE_LATENCY_STATS
1147 if (latencystats_enabled != 0 &&
1148 latencystats_lcore_id == rte_lcore_id())
1149 rte_latencystats_update();
1152 } while (! fc->stopped);
1156 start_pkt_forward_on_core(void *fwd_arg)
1158 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1159 cur_fwd_config.fwd_eng->packet_fwd);
1164 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1165 * Used to start communication flows in network loopback test configurations.
1168 run_one_txonly_burst_on_core(void *fwd_arg)
1170 struct fwd_lcore *fwd_lc;
1171 struct fwd_lcore tmp_lcore;
1173 fwd_lc = (struct fwd_lcore *) fwd_arg;
1174 tmp_lcore = *fwd_lc;
1175 tmp_lcore.stopped = 1;
1176 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1181 * Launch packet forwarding:
1182 * - Setup per-port forwarding context.
1183 * - launch logical cores with their forwarding configuration.
1186 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1188 port_fwd_begin_t port_fwd_begin;
1193 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1194 if (port_fwd_begin != NULL) {
1195 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1196 (*port_fwd_begin)(fwd_ports_ids[i]);
1198 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1199 lc_id = fwd_lcores_cpuids[i];
1200 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1201 fwd_lcores[i]->stopped = 0;
1202 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1203 fwd_lcores[i], lc_id);
1205 printf("launch lcore %u failed - diag=%d\n",
1212 * Launch packet forwarding configuration.
1215 start_packet_forwarding(int with_tx_first)
1217 port_fwd_begin_t port_fwd_begin;
1218 port_fwd_end_t port_fwd_end;
1219 struct rte_port *port;
1224 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1225 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1227 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1228 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1230 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1231 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1232 (!nb_rxq || !nb_txq))
1233 rte_exit(EXIT_FAILURE,
1234 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1235 cur_fwd_eng->fwd_mode_name);
1237 if (all_ports_started() == 0) {
1238 printf("Not all ports were started\n");
1241 if (test_done == 0) {
1242 printf("Packet forwarding already started\n");
1246 if (init_fwd_streams() < 0) {
1247 printf("Fail from init_fwd_streams()\n");
1252 for (i = 0; i < nb_fwd_ports; i++) {
1253 pt_id = fwd_ports_ids[i];
1254 port = &ports[pt_id];
1255 if (!port->dcb_flag) {
1256 printf("In DCB mode, all forwarding ports must "
1257 "be configured in this mode.\n");
1261 if (nb_fwd_lcores == 1) {
1262 printf("In DCB mode,the nb forwarding cores "
1263 "should be larger than 1.\n");
1270 flush_fwd_rx_queues();
1273 pkt_fwd_config_display(&cur_fwd_config);
1274 rxtx_config_display();
1276 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1277 pt_id = fwd_ports_ids[i];
1278 port = &ports[pt_id];
1279 rte_eth_stats_get(pt_id, &port->stats);
1280 port->tx_dropped = 0;
1282 map_port_queue_stats_mapping_registers(pt_id, port);
1284 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1285 fwd_streams[sm_id]->rx_packets = 0;
1286 fwd_streams[sm_id]->tx_packets = 0;
1287 fwd_streams[sm_id]->fwd_dropped = 0;
1288 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1289 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1291 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1292 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1293 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1294 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1295 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1297 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1298 fwd_streams[sm_id]->core_cycles = 0;
1301 if (with_tx_first) {
1302 port_fwd_begin = tx_only_engine.port_fwd_begin;
1303 if (port_fwd_begin != NULL) {
1304 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1305 (*port_fwd_begin)(fwd_ports_ids[i]);
1307 while (with_tx_first--) {
1308 launch_packet_forwarding(
1309 run_one_txonly_burst_on_core);
1310 rte_eal_mp_wait_lcore();
1312 port_fwd_end = tx_only_engine.port_fwd_end;
1313 if (port_fwd_end != NULL) {
1314 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1315 (*port_fwd_end)(fwd_ports_ids[i]);
1318 launch_packet_forwarding(start_pkt_forward_on_core);
1322 stop_packet_forwarding(void)
1324 struct rte_eth_stats stats;
1325 struct rte_port *port;
1326 port_fwd_end_t port_fwd_end;
1331 uint64_t total_recv;
1332 uint64_t total_xmit;
1333 uint64_t total_rx_dropped;
1334 uint64_t total_tx_dropped;
1335 uint64_t total_rx_nombuf;
1336 uint64_t tx_dropped;
1337 uint64_t rx_bad_ip_csum;
1338 uint64_t rx_bad_l4_csum;
1339 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1340 uint64_t fwd_cycles;
1343 static const char *acc_stats_border = "+++++++++++++++";
1346 printf("Packet forwarding not started\n");
1349 printf("Telling cores to stop...");
1350 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1351 fwd_lcores[lc_id]->stopped = 1;
1352 printf("\nWaiting for lcores to finish...\n");
1353 rte_eal_mp_wait_lcore();
1354 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1355 if (port_fwd_end != NULL) {
1356 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1357 pt_id = fwd_ports_ids[i];
1358 (*port_fwd_end)(pt_id);
1361 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1364 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1365 if (cur_fwd_config.nb_fwd_streams >
1366 cur_fwd_config.nb_fwd_ports) {
1367 fwd_stream_stats_display(sm_id);
1368 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1369 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1371 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1373 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1376 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1377 tx_dropped = (uint64_t) (tx_dropped +
1378 fwd_streams[sm_id]->fwd_dropped);
1379 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1382 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1383 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1384 fwd_streams[sm_id]->rx_bad_ip_csum);
1385 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1389 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1390 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1391 fwd_streams[sm_id]->rx_bad_l4_csum);
1392 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1395 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1396 fwd_cycles = (uint64_t) (fwd_cycles +
1397 fwd_streams[sm_id]->core_cycles);
1402 total_rx_dropped = 0;
1403 total_tx_dropped = 0;
1404 total_rx_nombuf = 0;
1405 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1406 pt_id = fwd_ports_ids[i];
1408 port = &ports[pt_id];
1409 rte_eth_stats_get(pt_id, &stats);
1410 stats.ipackets -= port->stats.ipackets;
1411 port->stats.ipackets = 0;
1412 stats.opackets -= port->stats.opackets;
1413 port->stats.opackets = 0;
1414 stats.ibytes -= port->stats.ibytes;
1415 port->stats.ibytes = 0;
1416 stats.obytes -= port->stats.obytes;
1417 port->stats.obytes = 0;
1418 stats.imissed -= port->stats.imissed;
1419 port->stats.imissed = 0;
1420 stats.oerrors -= port->stats.oerrors;
1421 port->stats.oerrors = 0;
1422 stats.rx_nombuf -= port->stats.rx_nombuf;
1423 port->stats.rx_nombuf = 0;
1425 total_recv += stats.ipackets;
1426 total_xmit += stats.opackets;
1427 total_rx_dropped += stats.imissed;
1428 total_tx_dropped += port->tx_dropped;
1429 total_rx_nombuf += stats.rx_nombuf;
1431 fwd_port_stats_display(pt_id, &stats);
1434 printf("\n %s Accumulated forward statistics for all ports"
1436 acc_stats_border, acc_stats_border);
1437 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1439 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1441 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1442 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1443 if (total_rx_nombuf > 0)
1444 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1445 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1447 acc_stats_border, acc_stats_border);
1448 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1450 printf("\n CPU cycles/packet=%u (total cycles="
1451 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1452 (unsigned int)(fwd_cycles / total_recv),
1453 fwd_cycles, total_recv);
1455 printf("\nDone.\n");
1460 dev_set_link_up(portid_t pid)
1462 if (rte_eth_dev_set_link_up(pid) < 0)
1463 printf("\nSet link up fail.\n");
1467 dev_set_link_down(portid_t pid)
1469 if (rte_eth_dev_set_link_down(pid) < 0)
1470 printf("\nSet link down fail.\n");
1474 all_ports_started(void)
1477 struct rte_port *port;
1479 RTE_ETH_FOREACH_DEV(pi) {
1481 /* Check if there is a port which is not started */
1482 if ((port->port_status != RTE_PORT_STARTED) &&
1483 (port->slave_flag == 0))
1487 /* No port is not started */
1492 port_is_stopped(portid_t port_id)
1494 struct rte_port *port = &ports[port_id];
1496 if ((port->port_status != RTE_PORT_STOPPED) &&
1497 (port->slave_flag == 0))
1503 all_ports_stopped(void)
1507 RTE_ETH_FOREACH_DEV(pi) {
1508 if (!port_is_stopped(pi))
1516 port_is_started(portid_t port_id)
1518 if (port_id_is_invalid(port_id, ENABLED_WARN))
1521 if (ports[port_id].port_status != RTE_PORT_STARTED)
1528 port_is_closed(portid_t port_id)
1530 if (port_id_is_invalid(port_id, ENABLED_WARN))
1533 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1540 start_port(portid_t pid)
1542 int diag, need_check_link_status = -1;
1545 struct rte_port *port;
1546 struct ether_addr mac_addr;
1547 enum rte_eth_event_type event_type;
1549 if (port_id_is_invalid(pid, ENABLED_WARN))
1554 RTE_ETH_FOREACH_DEV(pi) {
1555 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1558 need_check_link_status = 0;
1560 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1561 RTE_PORT_HANDLING) == 0) {
1562 printf("Port %d is now not stopped\n", pi);
1566 if (port->need_reconfig > 0) {
1567 port->need_reconfig = 0;
1569 if (flow_isolate_all) {
1570 int ret = port_flow_isolate(pi, 1);
1572 printf("Failed to apply isolated"
1573 " mode on port %d\n", pi);
1578 printf("Configuring Port %d (socket %u)\n", pi,
1580 /* configure port */
1581 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1584 if (rte_atomic16_cmpset(&(port->port_status),
1585 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1586 printf("Port %d can not be set back "
1587 "to stopped\n", pi);
1588 printf("Fail to configure port %d\n", pi);
1589 /* try to reconfigure port next time */
1590 port->need_reconfig = 1;
1594 if (port->need_reconfig_queues > 0) {
1595 port->need_reconfig_queues = 0;
1596 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1597 /* Apply Tx offloads configuration */
1598 port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1599 /* setup tx queues */
1600 for (qi = 0; qi < nb_txq; qi++) {
1601 if ((numa_support) &&
1602 (txring_numa[pi] != NUMA_NO_CONFIG))
1603 diag = rte_eth_tx_queue_setup(pi, qi,
1604 nb_txd,txring_numa[pi],
1607 diag = rte_eth_tx_queue_setup(pi, qi,
1608 nb_txd,port->socket_id,
1614 /* Fail to setup tx queue, return */
1615 if (rte_atomic16_cmpset(&(port->port_status),
1617 RTE_PORT_STOPPED) == 0)
1618 printf("Port %d can not be set back "
1619 "to stopped\n", pi);
1620 printf("Fail to configure port %d tx queues\n", pi);
1621 /* try to reconfigure queues next time */
1622 port->need_reconfig_queues = 1;
1625 /* Apply Rx offloads configuration */
1626 port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1627 /* setup rx queues */
1628 for (qi = 0; qi < nb_rxq; qi++) {
1629 if ((numa_support) &&
1630 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1631 struct rte_mempool * mp =
1632 mbuf_pool_find(rxring_numa[pi]);
1634 printf("Failed to setup RX queue:"
1635 "No mempool allocation"
1636 " on the socket %d\n",
1641 diag = rte_eth_rx_queue_setup(pi, qi,
1642 nb_rxd,rxring_numa[pi],
1643 &(port->rx_conf),mp);
1645 struct rte_mempool *mp =
1646 mbuf_pool_find(port->socket_id);
1648 printf("Failed to setup RX queue:"
1649 "No mempool allocation"
1650 " on the socket %d\n",
1654 diag = rte_eth_rx_queue_setup(pi, qi,
1655 nb_rxd,port->socket_id,
1656 &(port->rx_conf), mp);
1661 /* Fail to setup rx queue, return */
1662 if (rte_atomic16_cmpset(&(port->port_status),
1664 RTE_PORT_STOPPED) == 0)
1665 printf("Port %d can not be set back "
1666 "to stopped\n", pi);
1667 printf("Fail to configure port %d rx queues\n", pi);
1668 /* try to reconfigure queues next time */
1669 port->need_reconfig_queues = 1;
1675 if (rte_eth_dev_start(pi) < 0) {
1676 printf("Fail to start port %d\n", pi);
1678 /* Fail to setup rx queue, return */
1679 if (rte_atomic16_cmpset(&(port->port_status),
1680 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1681 printf("Port %d can not be set back to "
1686 if (rte_atomic16_cmpset(&(port->port_status),
1687 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1688 printf("Port %d can not be set into started\n", pi);
1690 rte_eth_macaddr_get(pi, &mac_addr);
1691 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1692 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1693 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1694 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1696 /* at least one port started, need checking link status */
1697 need_check_link_status = 1;
1700 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1701 event_type < RTE_ETH_EVENT_MAX;
1703 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1708 printf("Failed to setup even callback for event %d\n",
1714 if (need_check_link_status == 1 && !no_link_check)
1715 check_all_ports_link_status(RTE_PORT_ALL);
1716 else if (need_check_link_status == 0)
1717 printf("Please stop the ports first\n");
1724 stop_port(portid_t pid)
1727 struct rte_port *port;
1728 int need_check_link_status = 0;
1735 if (port_id_is_invalid(pid, ENABLED_WARN))
1738 printf("Stopping ports...\n");
1740 RTE_ETH_FOREACH_DEV(pi) {
1741 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1744 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1745 printf("Please remove port %d from forwarding configuration.\n", pi);
1749 if (port_is_bonding_slave(pi)) {
1750 printf("Please remove port %d from bonded device.\n", pi);
1755 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1756 RTE_PORT_HANDLING) == 0)
1759 rte_eth_dev_stop(pi);
1761 if (rte_atomic16_cmpset(&(port->port_status),
1762 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1763 printf("Port %d can not be set into stopped\n", pi);
1764 need_check_link_status = 1;
1766 if (need_check_link_status && !no_link_check)
1767 check_all_ports_link_status(RTE_PORT_ALL);
1773 close_port(portid_t pid)
1776 struct rte_port *port;
1778 if (port_id_is_invalid(pid, ENABLED_WARN))
1781 printf("Closing ports...\n");
1783 RTE_ETH_FOREACH_DEV(pi) {
1784 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1787 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1788 printf("Please remove port %d from forwarding configuration.\n", pi);
1792 if (port_is_bonding_slave(pi)) {
1793 printf("Please remove port %d from bonded device.\n", pi);
1798 if (rte_atomic16_cmpset(&(port->port_status),
1799 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1800 printf("Port %d is already closed\n", pi);
1804 if (rte_atomic16_cmpset(&(port->port_status),
1805 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1806 printf("Port %d is now not stopped\n", pi);
1810 if (port->flow_list)
1811 port_flow_flush(pi);
1812 rte_eth_dev_close(pi);
1814 if (rte_atomic16_cmpset(&(port->port_status),
1815 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1816 printf("Port %d cannot be set to closed\n", pi);
1823 reset_port(portid_t pid)
1827 struct rte_port *port;
1829 if (port_id_is_invalid(pid, ENABLED_WARN))
1832 printf("Resetting ports...\n");
1834 RTE_ETH_FOREACH_DEV(pi) {
1835 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1838 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1839 printf("Please remove port %d from forwarding "
1840 "configuration.\n", pi);
1844 if (port_is_bonding_slave(pi)) {
1845 printf("Please remove port %d from bonded device.\n",
1850 diag = rte_eth_dev_reset(pi);
1853 port->need_reconfig = 1;
1854 port->need_reconfig_queues = 1;
1856 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1864 eth_dev_event_callback_register(void)
1868 /* register the device event callback */
1869 ret = rte_dev_event_callback_register(NULL,
1870 eth_dev_event_callback, NULL);
1872 printf("Failed to register device event callback\n");
1881 eth_dev_event_callback_unregister(void)
1885 /* unregister the device event callback */
1886 ret = rte_dev_event_callback_unregister(NULL,
1887 eth_dev_event_callback, NULL);
1889 printf("Failed to unregister device event callback\n");
1897 attach_port(char *identifier)
1900 unsigned int socket_id;
1902 printf("Attaching a new port...\n");
1904 if (identifier == NULL) {
1905 printf("Invalid parameters are specified\n");
1909 if (rte_eth_dev_attach(identifier, &pi))
1912 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1913 /* if socket_id is invalid, set to 0 */
1914 if (check_socket_id(socket_id) < 0)
1916 reconfig(pi, socket_id);
1917 rte_eth_promiscuous_enable(pi);
1919 nb_ports = rte_eth_dev_count();
1921 ports[pi].port_status = RTE_PORT_STOPPED;
1923 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1928 detach_port(portid_t port_id)
1930 char name[RTE_ETH_NAME_MAX_LEN];
1932 printf("Detaching a port...\n");
1934 if (!port_is_closed(port_id)) {
1935 printf("Please close port first\n");
1939 if (ports[port_id].flow_list)
1940 port_flow_flush(port_id);
1942 if (rte_eth_dev_detach(port_id, name)) {
1943 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1947 nb_ports = rte_eth_dev_count();
1949 printf("Port '%s' is detached. Now total ports is %d\n",
1962 stop_packet_forwarding();
1964 if (ports != NULL) {
1966 RTE_ETH_FOREACH_DEV(pt_id) {
1967 printf("\nShutting down port %d...\n", pt_id);
1975 ret = rte_dev_event_monitor_stop();
1978 "fail to stop device event monitor.");
1980 ret = eth_dev_event_callback_unregister();
1983 "fail to unregister all event callbacks.");
1986 printf("\nBye...\n");
1989 typedef void (*cmd_func_t)(void);
1990 struct pmd_test_command {
1991 const char *cmd_name;
1992 cmd_func_t cmd_func;
1995 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1997 /* Check the link status of all ports in up to 9s, and print them finally */
1999 check_all_ports_link_status(uint32_t port_mask)
2001 #define CHECK_INTERVAL 100 /* 100ms */
2002 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2004 uint8_t count, all_ports_up, print_flag = 0;
2005 struct rte_eth_link link;
2007 printf("Checking link statuses...\n");
2009 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2011 RTE_ETH_FOREACH_DEV(portid) {
2012 if ((port_mask & (1 << portid)) == 0)
2014 memset(&link, 0, sizeof(link));
2015 rte_eth_link_get_nowait(portid, &link);
2016 /* print link status if flag set */
2017 if (print_flag == 1) {
2018 if (link.link_status)
2020 "Port%d Link Up. speed %u Mbps- %s\n",
2021 portid, link.link_speed,
2022 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2023 ("full-duplex") : ("half-duplex\n"));
2025 printf("Port %d Link Down\n", portid);
2028 /* clear all_ports_up flag if any link down */
2029 if (link.link_status == ETH_LINK_DOWN) {
2034 /* after finally printing all link status, get out */
2035 if (print_flag == 1)
2038 if (all_ports_up == 0) {
2040 rte_delay_ms(CHECK_INTERVAL);
2043 /* set the print_flag if all ports up or timeout */
2044 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2054 rmv_event_callback(void *arg)
2056 struct rte_eth_dev *dev;
2057 portid_t port_id = (intptr_t)arg;
2059 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2060 dev = &rte_eth_devices[port_id];
2063 close_port(port_id);
2064 printf("removing device %s\n", dev->device->name);
2065 if (rte_eal_dev_detach(dev->device))
2066 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2070 /* This function is used by the interrupt thread */
2072 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2075 static const char * const event_desc[] = {
2076 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2077 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2078 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2079 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2080 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2081 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2082 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2083 [RTE_ETH_EVENT_NEW] = "device probed",
2084 [RTE_ETH_EVENT_DESTROY] = "device released",
2085 [RTE_ETH_EVENT_MAX] = NULL,
2088 RTE_SET_USED(param);
2089 RTE_SET_USED(ret_param);
2091 if (type >= RTE_ETH_EVENT_MAX) {
2092 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2093 port_id, __func__, type);
2095 } else if (event_print_mask & (UINT32_C(1) << type)) {
2096 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2101 if (port_id_is_invalid(port_id, DISABLED_WARN))
2105 case RTE_ETH_EVENT_INTR_RMV:
2106 if (rte_eal_alarm_set(100000,
2107 rmv_event_callback, (void *)(intptr_t)port_id))
2108 fprintf(stderr, "Could not set up deferred device removal\n");
2116 /* This function is used by the interrupt thread */
2118 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2119 __rte_unused void *arg)
2121 if (type >= RTE_DEV_EVENT_MAX) {
2122 fprintf(stderr, "%s called upon invalid event %d\n",
2128 case RTE_DEV_EVENT_REMOVE:
2129 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2131 /* TODO: After finish failure handle, begin to stop
2132 * packet forward, stop port, close port, detach port.
2135 case RTE_DEV_EVENT_ADD:
2136 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2138 /* TODO: After finish kernel driver binding,
2139 * begin to attach port.
2148 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2152 uint8_t mapping_found = 0;
2154 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2155 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2156 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2157 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2158 tx_queue_stats_mappings[i].queue_id,
2159 tx_queue_stats_mappings[i].stats_counter_id);
2166 port->tx_queue_stats_mapping_enabled = 1;
2171 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2175 uint8_t mapping_found = 0;
2177 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2178 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2179 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2180 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2181 rx_queue_stats_mappings[i].queue_id,
2182 rx_queue_stats_mappings[i].stats_counter_id);
2189 port->rx_queue_stats_mapping_enabled = 1;
2194 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2198 diag = set_tx_queue_stats_mapping_registers(pi, port);
2200 if (diag == -ENOTSUP) {
2201 port->tx_queue_stats_mapping_enabled = 0;
2202 printf("TX queue stats mapping not supported port id=%d\n", pi);
2205 rte_exit(EXIT_FAILURE,
2206 "set_tx_queue_stats_mapping_registers "
2207 "failed for port id=%d diag=%d\n",
2211 diag = set_rx_queue_stats_mapping_registers(pi, port);
2213 if (diag == -ENOTSUP) {
2214 port->rx_queue_stats_mapping_enabled = 0;
2215 printf("RX queue stats mapping not supported port id=%d\n", pi);
2218 rte_exit(EXIT_FAILURE,
2219 "set_rx_queue_stats_mapping_registers "
2220 "failed for port id=%d diag=%d\n",
2226 rxtx_port_config(struct rte_port *port)
2228 port->rx_conf = port->dev_info.default_rxconf;
2229 port->tx_conf = port->dev_info.default_txconf;
2231 /* Check if any RX/TX parameters have been passed */
2232 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2233 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2235 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2236 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2238 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2239 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2241 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2242 port->rx_conf.rx_free_thresh = rx_free_thresh;
2244 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2245 port->rx_conf.rx_drop_en = rx_drop_en;
2247 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2248 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2250 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2251 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2253 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2254 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2256 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2257 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2259 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2260 port->tx_conf.tx_free_thresh = tx_free_thresh;
2264 init_port_config(void)
2267 struct rte_port *port;
2269 RTE_ETH_FOREACH_DEV(pid) {
2271 port->dev_conf.fdir_conf = fdir_conf;
2273 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2274 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2276 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2277 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2280 if (port->dcb_flag == 0) {
2281 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2282 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2284 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2287 rxtx_port_config(port);
2289 rte_eth_macaddr_get(pid, &port->eth_addr);
2291 map_port_queue_stats_mapping_registers(pid, port);
2292 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2293 rte_pmd_ixgbe_bypass_init(pid);
2296 if (lsc_interrupt &&
2297 (rte_eth_devices[pid].data->dev_flags &
2298 RTE_ETH_DEV_INTR_LSC))
2299 port->dev_conf.intr_conf.lsc = 1;
2300 if (rmv_interrupt &&
2301 (rte_eth_devices[pid].data->dev_flags &
2302 RTE_ETH_DEV_INTR_RMV))
2303 port->dev_conf.intr_conf.rmv = 1;
2305 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2306 /* Detect softnic port */
2307 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2308 port->softnic_enable = 1;
2309 memset(&port->softport, 0, sizeof(struct softnic_port));
2311 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2312 port->softport.tm_flag = 1;
2318 void set_port_slave_flag(portid_t slave_pid)
2320 struct rte_port *port;
2322 port = &ports[slave_pid];
2323 port->slave_flag = 1;
2326 void clear_port_slave_flag(portid_t slave_pid)
2328 struct rte_port *port;
2330 port = &ports[slave_pid];
2331 port->slave_flag = 0;
2334 uint8_t port_is_bonding_slave(portid_t slave_pid)
2336 struct rte_port *port;
2338 port = &ports[slave_pid];
2339 return port->slave_flag;
2342 const uint16_t vlan_tags[] = {
2343 0, 1, 2, 3, 4, 5, 6, 7,
2344 8, 9, 10, 11, 12, 13, 14, 15,
2345 16, 17, 18, 19, 20, 21, 22, 23,
2346 24, 25, 26, 27, 28, 29, 30, 31
2350 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2351 enum dcb_mode_enable dcb_mode,
2352 enum rte_eth_nb_tcs num_tcs,
2358 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2359 * given above, and the number of traffic classes available for use.
2361 if (dcb_mode == DCB_VT_ENABLED) {
2362 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2363 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2364 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2365 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2367 /* VMDQ+DCB RX and TX configurations */
2368 vmdq_rx_conf->enable_default_pool = 0;
2369 vmdq_rx_conf->default_pool = 0;
2370 vmdq_rx_conf->nb_queue_pools =
2371 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2372 vmdq_tx_conf->nb_queue_pools =
2373 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2375 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2376 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2377 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2378 vmdq_rx_conf->pool_map[i].pools =
2379 1 << (i % vmdq_rx_conf->nb_queue_pools);
2381 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2382 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2383 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2386 /* set DCB mode of RX and TX of multiple queues */
2387 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2388 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2390 struct rte_eth_dcb_rx_conf *rx_conf =
2391 ð_conf->rx_adv_conf.dcb_rx_conf;
2392 struct rte_eth_dcb_tx_conf *tx_conf =
2393 ð_conf->tx_adv_conf.dcb_tx_conf;
2395 rx_conf->nb_tcs = num_tcs;
2396 tx_conf->nb_tcs = num_tcs;
2398 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2399 rx_conf->dcb_tc[i] = i % num_tcs;
2400 tx_conf->dcb_tc[i] = i % num_tcs;
2402 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2403 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2404 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2408 eth_conf->dcb_capability_en =
2409 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2411 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2417 init_port_dcb_config(portid_t pid,
2418 enum dcb_mode_enable dcb_mode,
2419 enum rte_eth_nb_tcs num_tcs,
2422 struct rte_eth_conf port_conf;
2423 struct rte_port *rte_port;
2427 rte_port = &ports[pid];
2429 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2430 /* Enter DCB configuration status */
2433 port_conf.rxmode = rte_port->dev_conf.rxmode;
2434 port_conf.txmode = rte_port->dev_conf.txmode;
2436 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2437 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2440 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2443 * Write the configuration into the device.
2444 * Set the numbers of RX & TX queues to 0, so
2445 * the RX & TX queues will not be setup.
2447 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2449 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2451 /* If dev_info.vmdq_pool_base is greater than 0,
2452 * the queue id of vmdq pools is started after pf queues.
2454 if (dcb_mode == DCB_VT_ENABLED &&
2455 rte_port->dev_info.vmdq_pool_base > 0) {
2456 printf("VMDQ_DCB multi-queue mode is nonsensical"
2457 " for port %d.", pid);
2461 /* Assume the ports in testpmd have the same dcb capability
2462 * and has the same number of rxq and txq in dcb mode
2464 if (dcb_mode == DCB_VT_ENABLED) {
2465 if (rte_port->dev_info.max_vfs > 0) {
2466 nb_rxq = rte_port->dev_info.nb_rx_queues;
2467 nb_txq = rte_port->dev_info.nb_tx_queues;
2469 nb_rxq = rte_port->dev_info.max_rx_queues;
2470 nb_txq = rte_port->dev_info.max_tx_queues;
2473 /*if vt is disabled, use all pf queues */
2474 if (rte_port->dev_info.vmdq_pool_base == 0) {
2475 nb_rxq = rte_port->dev_info.max_rx_queues;
2476 nb_txq = rte_port->dev_info.max_tx_queues;
2478 nb_rxq = (queueid_t)num_tcs;
2479 nb_txq = (queueid_t)num_tcs;
2483 rx_free_thresh = 64;
2485 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2487 rxtx_port_config(rte_port);
2489 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2490 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2491 rx_vft_set(pid, vlan_tags[i], 1);
2493 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2494 map_port_queue_stats_mapping_registers(pid, rte_port);
2496 rte_port->dcb_flag = 1;
2504 /* Configuration of Ethernet ports. */
2505 ports = rte_zmalloc("testpmd: ports",
2506 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2507 RTE_CACHE_LINE_SIZE);
2508 if (ports == NULL) {
2509 rte_exit(EXIT_FAILURE,
2510 "rte_zmalloc(%d struct rte_port) failed\n",
2526 const char clr[] = { 27, '[', '2', 'J', '\0' };
2527 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2529 /* Clear screen and move to top left */
2530 printf("%s%s", clr, top_left);
2532 printf("\nPort statistics ====================================");
2533 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2534 nic_stats_display(fwd_ports_ids[i]);
2538 signal_handler(int signum)
2540 if (signum == SIGINT || signum == SIGTERM) {
2541 printf("\nSignal %d received, preparing to exit...\n",
2543 #ifdef RTE_LIBRTE_PDUMP
2544 /* uninitialize packet capture framework */
2547 #ifdef RTE_LIBRTE_LATENCY_STATS
2548 rte_latencystats_uninit();
2551 /* Set flag to indicate the force termination. */
2553 /* exit with the expected status */
2554 signal(signum, SIG_DFL);
2555 kill(getpid(), signum);
2560 main(int argc, char** argv)
2566 signal(SIGINT, signal_handler);
2567 signal(SIGTERM, signal_handler);
2569 diag = rte_eal_init(argc, argv);
2571 rte_panic("Cannot init EAL\n");
2573 testpmd_logtype = rte_log_register("testpmd");
2574 if (testpmd_logtype < 0)
2575 rte_panic("Cannot register log type");
2576 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2578 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2579 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2583 #ifdef RTE_LIBRTE_PDUMP
2584 /* initialize packet capture framework */
2585 rte_pdump_init(NULL);
2588 nb_ports = (portid_t) rte_eth_dev_count();
2590 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2592 /* allocate port structures, and init them */
2595 set_def_fwd_config();
2597 rte_panic("Empty set of forwarding logical cores - check the "
2598 "core mask supplied in the command parameters\n");
2600 /* Bitrate/latency stats disabled by default */
2601 #ifdef RTE_LIBRTE_BITRATE
2602 bitrate_enabled = 0;
2604 #ifdef RTE_LIBRTE_LATENCY_STATS
2605 latencystats_enabled = 0;
2611 launch_args_parse(argc, argv);
2613 if (tx_first && interactive)
2614 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2615 "interactive mode.\n");
2617 if (tx_first && lsc_interrupt) {
2618 printf("Warning: lsc_interrupt needs to be off when "
2619 " using tx_first. Disabling.\n");
2623 if (!nb_rxq && !nb_txq)
2624 printf("Warning: Either rx or tx queues should be non-zero\n");
2626 if (nb_rxq > 1 && nb_rxq > nb_txq)
2627 printf("Warning: nb_rxq=%d enables RSS configuration, "
2628 "but nb_txq=%d will prevent to fully test it.\n",
2634 /* enable hot plug monitoring */
2635 ret = rte_dev_event_monitor_start();
2640 eth_dev_event_callback_register();
2644 if (start_port(RTE_PORT_ALL) != 0)
2645 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2647 /* set all ports to promiscuous mode by default */
2648 RTE_ETH_FOREACH_DEV(port_id)
2649 rte_eth_promiscuous_enable(port_id);
2651 /* Init metrics library */
2652 rte_metrics_init(rte_socket_id());
2654 #ifdef RTE_LIBRTE_LATENCY_STATS
2655 if (latencystats_enabled != 0) {
2656 int ret = rte_latencystats_init(1, NULL);
2658 printf("Warning: latencystats init()"
2659 " returned error %d\n", ret);
2660 printf("Latencystats running on lcore %d\n",
2661 latencystats_lcore_id);
2665 /* Setup bitrate stats */
2666 #ifdef RTE_LIBRTE_BITRATE
2667 if (bitrate_enabled != 0) {
2668 bitrate_data = rte_stats_bitrate_create();
2669 if (bitrate_data == NULL)
2670 rte_exit(EXIT_FAILURE,
2671 "Could not allocate bitrate data.\n");
2672 rte_stats_bitrate_reg(bitrate_data);
2676 #ifdef RTE_LIBRTE_CMDLINE
2677 if (strlen(cmdline_filename) != 0)
2678 cmdline_read_from_file(cmdline_filename);
2680 if (interactive == 1) {
2682 printf("Start automatic packet forwarding\n");
2683 start_packet_forwarding(0);
2695 printf("No commandline core given, start packet forwarding\n");
2696 start_packet_forwarding(tx_first);
2697 if (stats_period != 0) {
2698 uint64_t prev_time = 0, cur_time, diff_time = 0;
2699 uint64_t timer_period;
2701 /* Convert to number of cycles */
2702 timer_period = stats_period * rte_get_timer_hz();
2704 while (f_quit == 0) {
2705 cur_time = rte_get_timer_cycles();
2706 diff_time += cur_time - prev_time;
2708 if (diff_time >= timer_period) {
2710 /* Reset the timer */
2713 /* Sleep to avoid unnecessary checks */
2714 prev_time = cur_time;
2719 printf("Press enter to exit\n");
2720 rc = read(0, &c, 1);