1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
16 #include <sys/queue.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
41 #include <rte_interrupts.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
71 char cmdline_filename[PATH_MAX] = {0};
74 * NUMA support configuration.
75 * When set, the NUMA support attempts to dispatch the allocation of the
76 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77 * probed ports among the CPU sockets 0 and 1.
78 * Otherwise, all memory is allocated from CPU socket 0.
80 uint8_t numa_support = 1; /**< numa enabled by default */
83 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86 uint8_t socket_num = UMA_NO_CONFIG;
89 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
94 * Record the Ethernet address of peer target ports to which packets are
96 * Must be instantiated with the ethernet addresses of peer traffic generator
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
103 * Probed Target Environment.
105 struct rte_port *ports; /**< For all probed ethernet ports. */
106 portid_t nb_ports; /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
111 * Test Forwarding Configuration.
112 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t nb_cfg_ports; /**< Number of configured ports. */
118 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
127 * Forwarding engines.
129 struct fwd_engine * fwd_engines[] = {
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
140 &softnic_tm_bypass_engine,
142 #ifdef RTE_LIBRTE_IEEE1588
143 &ieee1588_fwd_engine,
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
156 * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
160 * In container, it cannot terminate the process which running with 'stats-period'
161 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
166 * Configuration of packet segments used by the "txonly" processing engine.
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170 TXONLY_DEF_PACKET_LEN,
172 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
187 * Configurable number of RX/TX queues.
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 * Configurable number of RX/TX ring descriptors.
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 #define RTE_PMD_PARAM_UNSET -1
202 * Configurable values of RX and TX ring threshold registers.
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214 * Configurable value of RX free threshold.
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219 * Configurable value of RX drop enable.
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224 * Configurable value of TX free threshold.
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of TX RS bit threshold.
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234 * Receive Side Scaling (RSS) configuration.
236 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
239 * Port topology configuration
241 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
244 * Avoids to flush all the RX streams before starts forwarding.
246 uint8_t no_flush_rx = 0; /* flush by default */
249 * Flow API isolated mode.
251 uint8_t flow_isolate_all;
254 * Avoids to check link status when starting/stopping a port.
256 uint8_t no_link_check = 0; /* check by default */
259 * Enable link status change notification
261 uint8_t lsc_interrupt = 1; /* enabled by default */
264 * Enable device removal notification.
266 uint8_t rmv_interrupt = 1; /* enabled by default */
269 * Display or mask ether events
270 * Default to all events except VF_MBOX
272 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
273 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
274 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
275 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
276 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
277 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
280 * NIC bypass mode configuration options.
283 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
284 /* The NIC bypass watchdog timeout. */
285 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
289 #ifdef RTE_LIBRTE_LATENCY_STATS
292 * Set when latency stats is enabled in the commandline
294 uint8_t latencystats_enabled;
297 * Lcore ID to serive latency statistics.
299 lcoreid_t latencystats_lcore_id = -1;
304 * Ethernet device configuration.
306 struct rte_eth_rxmode rx_mode = {
307 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
308 .offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
309 DEV_RX_OFFLOAD_VLAN_STRIP |
310 DEV_RX_OFFLOAD_CRC_STRIP),
311 .ignore_offload_bitfield = 1,
314 struct rte_eth_txmode tx_mode = {
315 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
318 struct rte_fdir_conf fdir_conf = {
319 .mode = RTE_FDIR_MODE_NONE,
320 .pballoc = RTE_FDIR_PBALLOC_64K,
321 .status = RTE_FDIR_REPORT_STATUS,
323 .vlan_tci_mask = 0x0,
325 .src_ip = 0xFFFFFFFF,
326 .dst_ip = 0xFFFFFFFF,
329 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
330 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
332 .src_port_mask = 0xFFFF,
333 .dst_port_mask = 0xFFFF,
334 .mac_addr_byte_mask = 0xFF,
335 .tunnel_type_mask = 1,
336 .tunnel_id_mask = 0xFFFFFFFF,
341 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
343 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
344 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
346 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
347 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
349 uint16_t nb_tx_queue_stats_mappings = 0;
350 uint16_t nb_rx_queue_stats_mappings = 0;
353 * Display zero values by default for xstats
355 uint8_t xstats_hide_zero;
357 unsigned int num_sockets = 0;
358 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
360 #ifdef RTE_LIBRTE_BITRATE
361 /* Bitrate statistics */
362 struct rte_stats_bitrates *bitrate_data;
363 lcoreid_t bitrate_lcore_id;
364 uint8_t bitrate_enabled;
367 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
368 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
370 /* Forward function declarations */
371 static void map_port_queue_stats_mapping_registers(portid_t pi,
372 struct rte_port *port);
373 static void check_all_ports_link_status(uint32_t port_mask);
374 static int eth_event_callback(portid_t port_id,
375 enum rte_eth_event_type type,
376 void *param, void *ret_param);
379 * Check if all the ports are started.
380 * If yes, return positive value. If not, return zero.
382 static int all_ports_started(void);
384 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
385 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
388 * Helper function to check if socket is already discovered.
389 * If yes, return positive value. If not, return zero.
392 new_socket_id(unsigned int socket_id)
396 for (i = 0; i < num_sockets; i++) {
397 if (socket_ids[i] == socket_id)
404 * Setup default configuration.
407 set_default_fwd_lcores_config(void)
411 unsigned int sock_num;
414 for (i = 0; i < RTE_MAX_LCORE; i++) {
415 sock_num = rte_lcore_to_socket_id(i);
416 if (new_socket_id(sock_num)) {
417 if (num_sockets >= RTE_MAX_NUMA_NODES) {
418 rte_exit(EXIT_FAILURE,
419 "Total sockets greater than %u\n",
422 socket_ids[num_sockets++] = sock_num;
424 if (!rte_lcore_is_enabled(i))
426 if (i == rte_get_master_lcore())
428 fwd_lcores_cpuids[nb_lc++] = i;
430 nb_lcores = (lcoreid_t) nb_lc;
431 nb_cfg_lcores = nb_lcores;
436 set_def_peer_eth_addrs(void)
440 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
441 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
442 peer_eth_addrs[i].addr_bytes[5] = i;
447 set_default_fwd_ports_config(void)
452 RTE_ETH_FOREACH_DEV(pt_id)
453 fwd_ports_ids[i++] = pt_id;
455 nb_cfg_ports = nb_ports;
456 nb_fwd_ports = nb_ports;
460 set_def_fwd_config(void)
462 set_default_fwd_lcores_config();
463 set_def_peer_eth_addrs();
464 set_default_fwd_ports_config();
468 * Configuration initialisation done once at init time.
471 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
472 unsigned int socket_id)
474 char pool_name[RTE_MEMPOOL_NAMESIZE];
475 struct rte_mempool *rte_mp = NULL;
478 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
479 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
482 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
483 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
486 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
487 mb_size, (unsigned) mb_mempool_cache,
488 sizeof(struct rte_pktmbuf_pool_private),
493 if (rte_mempool_populate_anon(rte_mp) == 0) {
494 rte_mempool_free(rte_mp);
498 rte_pktmbuf_pool_init(rte_mp, NULL);
499 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
501 /* wrapper to rte_mempool_create() */
502 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
503 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
507 if (rte_mp == NULL) {
508 rte_exit(EXIT_FAILURE,
509 "Creation of mbuf pool for socket %u failed: %s\n",
510 socket_id, rte_strerror(rte_errno));
511 } else if (verbose_level > 0) {
512 rte_mempool_dump(stdout, rte_mp);
517 * Check given socket id is valid or not with NUMA mode,
518 * if valid, return 0, else return -1
521 check_socket_id(const unsigned int socket_id)
523 static int warning_once = 0;
525 if (new_socket_id(socket_id)) {
526 if (!warning_once && numa_support)
527 printf("Warning: NUMA should be configured manually by"
528 " using --port-numa-config and"
529 " --ring-numa-config parameters along with"
538 * Get the allowed maximum number of RX queues.
539 * *pid return the port id which has minimal value of
540 * max_rx_queues in all ports.
543 get_allowed_max_nb_rxq(portid_t *pid)
545 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
547 struct rte_eth_dev_info dev_info;
549 RTE_ETH_FOREACH_DEV(pi) {
550 rte_eth_dev_info_get(pi, &dev_info);
551 if (dev_info.max_rx_queues < allowed_max_rxq) {
552 allowed_max_rxq = dev_info.max_rx_queues;
556 return allowed_max_rxq;
560 * Check input rxq is valid or not.
561 * If input rxq is not greater than any of maximum number
562 * of RX queues of all ports, it is valid.
563 * if valid, return 0, else return -1
566 check_nb_rxq(queueid_t rxq)
568 queueid_t allowed_max_rxq;
571 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
572 if (rxq > allowed_max_rxq) {
573 printf("Fail: input rxq (%u) can't be greater "
574 "than max_rx_queues (%u) of port %u\n",
587 struct rte_port *port;
588 struct rte_mempool *mbp;
589 unsigned int nb_mbuf_per_pool;
591 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
592 struct rte_gro_param gro_param;
595 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
598 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
599 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
600 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
603 /* Configuration of logical cores. */
604 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
605 sizeof(struct fwd_lcore *) * nb_lcores,
606 RTE_CACHE_LINE_SIZE);
607 if (fwd_lcores == NULL) {
608 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
609 "failed\n", nb_lcores);
611 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
612 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
613 sizeof(struct fwd_lcore),
614 RTE_CACHE_LINE_SIZE);
615 if (fwd_lcores[lc_id] == NULL) {
616 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
619 fwd_lcores[lc_id]->cpuid_idx = lc_id;
622 RTE_ETH_FOREACH_DEV(pid) {
624 /* Apply default Tx configuration for all ports */
625 port->dev_conf.txmode = tx_mode;
626 port->dev_conf.rxmode = rx_mode;
627 rte_eth_dev_info_get(pid, &port->dev_info);
628 if (!(port->dev_info.tx_offload_capa &
629 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
630 port->dev_conf.txmode.offloads &=
631 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
634 if (port_numa[pid] != NUMA_NO_CONFIG)
635 port_per_socket[port_numa[pid]]++;
637 uint32_t socket_id = rte_eth_dev_socket_id(pid);
639 /* if socket_id is invalid, set to 0 */
640 if (check_socket_id(socket_id) < 0)
642 port_per_socket[socket_id]++;
646 /* set flag to initialize port/queue */
647 port->need_reconfig = 1;
648 port->need_reconfig_queues = 1;
652 * Create pools of mbuf.
653 * If NUMA support is disabled, create a single pool of mbuf in
654 * socket 0 memory by default.
655 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
657 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
658 * nb_txd can be configured at run time.
660 if (param_total_num_mbufs)
661 nb_mbuf_per_pool = param_total_num_mbufs;
663 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
664 (nb_lcores * mb_mempool_cache) +
665 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
666 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
672 for (i = 0; i < num_sockets; i++)
673 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
676 if (socket_num == UMA_NO_CONFIG)
677 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
679 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
685 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
686 DEV_TX_OFFLOAD_GRE_TNL_TSO;
688 * Records which Mbuf pool to use by each logical core, if needed.
690 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
691 mbp = mbuf_pool_find(
692 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
695 mbp = mbuf_pool_find(0);
696 fwd_lcores[lc_id]->mbp = mbp;
697 /* initialize GSO context */
698 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
699 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
700 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
701 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
703 fwd_lcores[lc_id]->gso_ctx.flag = 0;
706 /* Configuration of packet forwarding streams. */
707 if (init_fwd_streams() < 0)
708 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
712 /* create a gro context for each lcore */
713 gro_param.gro_types = RTE_GRO_TCP_IPV4;
714 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
715 gro_param.max_item_per_flow = MAX_PKT_BURST;
716 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
717 gro_param.socket_id = rte_lcore_to_socket_id(
718 fwd_lcores_cpuids[lc_id]);
719 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
720 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
721 rte_exit(EXIT_FAILURE,
722 "rte_gro_ctx_create() failed\n");
729 reconfig(portid_t new_port_id, unsigned socket_id)
731 struct rte_port *port;
733 /* Reconfiguration of Ethernet ports. */
734 port = &ports[new_port_id];
735 rte_eth_dev_info_get(new_port_id, &port->dev_info);
737 /* set flag to initialize port/queue */
738 port->need_reconfig = 1;
739 port->need_reconfig_queues = 1;
740 port->socket_id = socket_id;
747 init_fwd_streams(void)
750 struct rte_port *port;
751 streamid_t sm_id, nb_fwd_streams_new;
754 /* set socket id according to numa or not */
755 RTE_ETH_FOREACH_DEV(pid) {
757 if (nb_rxq > port->dev_info.max_rx_queues) {
758 printf("Fail: nb_rxq(%d) is greater than "
759 "max_rx_queues(%d)\n", nb_rxq,
760 port->dev_info.max_rx_queues);
763 if (nb_txq > port->dev_info.max_tx_queues) {
764 printf("Fail: nb_txq(%d) is greater than "
765 "max_tx_queues(%d)\n", nb_txq,
766 port->dev_info.max_tx_queues);
770 if (port_numa[pid] != NUMA_NO_CONFIG)
771 port->socket_id = port_numa[pid];
773 port->socket_id = rte_eth_dev_socket_id(pid);
775 /* if socket_id is invalid, set to 0 */
776 if (check_socket_id(port->socket_id) < 0)
781 if (socket_num == UMA_NO_CONFIG)
784 port->socket_id = socket_num;
788 q = RTE_MAX(nb_rxq, nb_txq);
790 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
793 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
794 if (nb_fwd_streams_new == nb_fwd_streams)
797 if (fwd_streams != NULL) {
798 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
799 if (fwd_streams[sm_id] == NULL)
801 rte_free(fwd_streams[sm_id]);
802 fwd_streams[sm_id] = NULL;
804 rte_free(fwd_streams);
809 nb_fwd_streams = nb_fwd_streams_new;
810 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
811 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
812 if (fwd_streams == NULL)
813 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
814 "failed\n", nb_fwd_streams);
816 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
817 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
818 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
819 if (fwd_streams[sm_id] == NULL)
820 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
827 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
829 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
831 unsigned int total_burst;
832 unsigned int nb_burst;
833 unsigned int burst_stats[3];
834 uint16_t pktnb_stats[3];
836 int burst_percent[3];
839 * First compute the total number of packet bursts and the
840 * two highest numbers of bursts of the same number of packets.
843 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
844 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
845 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
846 nb_burst = pbs->pkt_burst_spread[nb_pkt];
849 total_burst += nb_burst;
850 if (nb_burst > burst_stats[0]) {
851 burst_stats[1] = burst_stats[0];
852 pktnb_stats[1] = pktnb_stats[0];
853 burst_stats[0] = nb_burst;
854 pktnb_stats[0] = nb_pkt;
857 if (total_burst == 0)
859 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
860 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
861 burst_percent[0], (int) pktnb_stats[0]);
862 if (burst_stats[0] == total_burst) {
866 if (burst_stats[0] + burst_stats[1] == total_burst) {
867 printf(" + %d%% of %d pkts]\n",
868 100 - burst_percent[0], pktnb_stats[1]);
871 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
872 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
873 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
874 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
877 printf(" + %d%% of %d pkts + %d%% of others]\n",
878 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
880 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
883 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
885 struct rte_port *port;
888 static const char *fwd_stats_border = "----------------------";
890 port = &ports[port_id];
891 printf("\n %s Forward statistics for port %-2d %s\n",
892 fwd_stats_border, port_id, fwd_stats_border);
894 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
895 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
897 stats->ipackets, stats->imissed,
898 (uint64_t) (stats->ipackets + stats->imissed));
900 if (cur_fwd_eng == &csum_fwd_engine)
901 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
902 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
903 if ((stats->ierrors + stats->rx_nombuf) > 0) {
904 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
905 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
908 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
910 stats->opackets, port->tx_dropped,
911 (uint64_t) (stats->opackets + port->tx_dropped));
914 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
916 stats->ipackets, stats->imissed,
917 (uint64_t) (stats->ipackets + stats->imissed));
919 if (cur_fwd_eng == &csum_fwd_engine)
920 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
921 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
922 if ((stats->ierrors + stats->rx_nombuf) > 0) {
923 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
924 printf(" RX-nombufs: %14"PRIu64"\n",
928 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
930 stats->opackets, port->tx_dropped,
931 (uint64_t) (stats->opackets + port->tx_dropped));
934 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
936 pkt_burst_stats_display("RX",
937 &port->rx_stream->rx_burst_stats);
939 pkt_burst_stats_display("TX",
940 &port->tx_stream->tx_burst_stats);
943 if (port->rx_queue_stats_mapping_enabled) {
945 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
946 printf(" Stats reg %2d RX-packets:%14"PRIu64
947 " RX-errors:%14"PRIu64
948 " RX-bytes:%14"PRIu64"\n",
949 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
953 if (port->tx_queue_stats_mapping_enabled) {
954 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
955 printf(" Stats reg %2d TX-packets:%14"PRIu64
956 " TX-bytes:%14"PRIu64"\n",
957 i, stats->q_opackets[i], stats->q_obytes[i]);
961 printf(" %s--------------------------------%s\n",
962 fwd_stats_border, fwd_stats_border);
966 fwd_stream_stats_display(streamid_t stream_id)
968 struct fwd_stream *fs;
969 static const char *fwd_top_stats_border = "-------";
971 fs = fwd_streams[stream_id];
972 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
973 (fs->fwd_dropped == 0))
975 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
976 "TX Port=%2d/Queue=%2d %s\n",
977 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
978 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
979 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
980 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
982 /* if checksum mode */
983 if (cur_fwd_eng == &csum_fwd_engine) {
984 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
985 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
988 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
989 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
990 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
995 flush_fwd_rx_queues(void)
997 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1004 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1005 uint64_t timer_period;
1007 /* convert to number of cycles */
1008 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1010 for (j = 0; j < 2; j++) {
1011 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1012 for (rxq = 0; rxq < nb_rxq; rxq++) {
1013 port_id = fwd_ports_ids[rxp];
1015 * testpmd can stuck in the below do while loop
1016 * if rte_eth_rx_burst() always returns nonzero
1017 * packets. So timer is added to exit this loop
1018 * after 1sec timer expiry.
1020 prev_tsc = rte_rdtsc();
1022 nb_rx = rte_eth_rx_burst(port_id, rxq,
1023 pkts_burst, MAX_PKT_BURST);
1024 for (i = 0; i < nb_rx; i++)
1025 rte_pktmbuf_free(pkts_burst[i]);
1027 cur_tsc = rte_rdtsc();
1028 diff_tsc = cur_tsc - prev_tsc;
1029 timer_tsc += diff_tsc;
1030 } while ((nb_rx > 0) &&
1031 (timer_tsc < timer_period));
1035 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1040 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1042 struct fwd_stream **fsm;
1045 #ifdef RTE_LIBRTE_BITRATE
1046 uint64_t tics_per_1sec;
1047 uint64_t tics_datum;
1048 uint64_t tics_current;
1049 uint8_t idx_port, cnt_ports;
1051 cnt_ports = rte_eth_dev_count();
1052 tics_datum = rte_rdtsc();
1053 tics_per_1sec = rte_get_timer_hz();
1055 fsm = &fwd_streams[fc->stream_idx];
1056 nb_fs = fc->stream_nb;
1058 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1059 (*pkt_fwd)(fsm[sm_id]);
1060 #ifdef RTE_LIBRTE_BITRATE
1061 if (bitrate_enabled != 0 &&
1062 bitrate_lcore_id == rte_lcore_id()) {
1063 tics_current = rte_rdtsc();
1064 if (tics_current - tics_datum >= tics_per_1sec) {
1065 /* Periodic bitrate calculation */
1067 idx_port < cnt_ports;
1069 rte_stats_bitrate_calc(bitrate_data,
1071 tics_datum = tics_current;
1075 #ifdef RTE_LIBRTE_LATENCY_STATS
1076 if (latencystats_enabled != 0 &&
1077 latencystats_lcore_id == rte_lcore_id())
1078 rte_latencystats_update();
1081 } while (! fc->stopped);
1085 start_pkt_forward_on_core(void *fwd_arg)
1087 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1088 cur_fwd_config.fwd_eng->packet_fwd);
1093 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1094 * Used to start communication flows in network loopback test configurations.
1097 run_one_txonly_burst_on_core(void *fwd_arg)
1099 struct fwd_lcore *fwd_lc;
1100 struct fwd_lcore tmp_lcore;
1102 fwd_lc = (struct fwd_lcore *) fwd_arg;
1103 tmp_lcore = *fwd_lc;
1104 tmp_lcore.stopped = 1;
1105 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1110 * Launch packet forwarding:
1111 * - Setup per-port forwarding context.
1112 * - launch logical cores with their forwarding configuration.
1115 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1117 port_fwd_begin_t port_fwd_begin;
1122 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1123 if (port_fwd_begin != NULL) {
1124 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1125 (*port_fwd_begin)(fwd_ports_ids[i]);
1127 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1128 lc_id = fwd_lcores_cpuids[i];
1129 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1130 fwd_lcores[i]->stopped = 0;
1131 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1132 fwd_lcores[i], lc_id);
1134 printf("launch lcore %u failed - diag=%d\n",
1141 * Launch packet forwarding configuration.
1144 start_packet_forwarding(int with_tx_first)
1146 port_fwd_begin_t port_fwd_begin;
1147 port_fwd_end_t port_fwd_end;
1148 struct rte_port *port;
1153 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1154 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1156 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1157 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1159 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1160 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1161 (!nb_rxq || !nb_txq))
1162 rte_exit(EXIT_FAILURE,
1163 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1164 cur_fwd_eng->fwd_mode_name);
1166 if (all_ports_started() == 0) {
1167 printf("Not all ports were started\n");
1170 if (test_done == 0) {
1171 printf("Packet forwarding already started\n");
1175 if (init_fwd_streams() < 0) {
1176 printf("Fail from init_fwd_streams()\n");
1181 for (i = 0; i < nb_fwd_ports; i++) {
1182 pt_id = fwd_ports_ids[i];
1183 port = &ports[pt_id];
1184 if (!port->dcb_flag) {
1185 printf("In DCB mode, all forwarding ports must "
1186 "be configured in this mode.\n");
1190 if (nb_fwd_lcores == 1) {
1191 printf("In DCB mode,the nb forwarding cores "
1192 "should be larger than 1.\n");
1199 flush_fwd_rx_queues();
1202 pkt_fwd_config_display(&cur_fwd_config);
1203 rxtx_config_display();
1205 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1206 pt_id = fwd_ports_ids[i];
1207 port = &ports[pt_id];
1208 rte_eth_stats_get(pt_id, &port->stats);
1209 port->tx_dropped = 0;
1211 map_port_queue_stats_mapping_registers(pt_id, port);
1213 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1214 fwd_streams[sm_id]->rx_packets = 0;
1215 fwd_streams[sm_id]->tx_packets = 0;
1216 fwd_streams[sm_id]->fwd_dropped = 0;
1217 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1218 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1220 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1221 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1222 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1223 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1224 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1226 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1227 fwd_streams[sm_id]->core_cycles = 0;
1230 if (with_tx_first) {
1231 port_fwd_begin = tx_only_engine.port_fwd_begin;
1232 if (port_fwd_begin != NULL) {
1233 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1234 (*port_fwd_begin)(fwd_ports_ids[i]);
1236 while (with_tx_first--) {
1237 launch_packet_forwarding(
1238 run_one_txonly_burst_on_core);
1239 rte_eal_mp_wait_lcore();
1241 port_fwd_end = tx_only_engine.port_fwd_end;
1242 if (port_fwd_end != NULL) {
1243 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1244 (*port_fwd_end)(fwd_ports_ids[i]);
1247 launch_packet_forwarding(start_pkt_forward_on_core);
1251 stop_packet_forwarding(void)
1253 struct rte_eth_stats stats;
1254 struct rte_port *port;
1255 port_fwd_end_t port_fwd_end;
1260 uint64_t total_recv;
1261 uint64_t total_xmit;
1262 uint64_t total_rx_dropped;
1263 uint64_t total_tx_dropped;
1264 uint64_t total_rx_nombuf;
1265 uint64_t tx_dropped;
1266 uint64_t rx_bad_ip_csum;
1267 uint64_t rx_bad_l4_csum;
1268 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1269 uint64_t fwd_cycles;
1272 static const char *acc_stats_border = "+++++++++++++++";
1275 printf("Packet forwarding not started\n");
1278 printf("Telling cores to stop...");
1279 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1280 fwd_lcores[lc_id]->stopped = 1;
1281 printf("\nWaiting for lcores to finish...\n");
1282 rte_eal_mp_wait_lcore();
1283 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1284 if (port_fwd_end != NULL) {
1285 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1286 pt_id = fwd_ports_ids[i];
1287 (*port_fwd_end)(pt_id);
1290 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1293 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1294 if (cur_fwd_config.nb_fwd_streams >
1295 cur_fwd_config.nb_fwd_ports) {
1296 fwd_stream_stats_display(sm_id);
1297 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1298 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1300 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1302 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1305 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1306 tx_dropped = (uint64_t) (tx_dropped +
1307 fwd_streams[sm_id]->fwd_dropped);
1308 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1311 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1312 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1313 fwd_streams[sm_id]->rx_bad_ip_csum);
1314 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1318 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1319 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1320 fwd_streams[sm_id]->rx_bad_l4_csum);
1321 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1324 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1325 fwd_cycles = (uint64_t) (fwd_cycles +
1326 fwd_streams[sm_id]->core_cycles);
1331 total_rx_dropped = 0;
1332 total_tx_dropped = 0;
1333 total_rx_nombuf = 0;
1334 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1335 pt_id = fwd_ports_ids[i];
1337 port = &ports[pt_id];
1338 rte_eth_stats_get(pt_id, &stats);
1339 stats.ipackets -= port->stats.ipackets;
1340 port->stats.ipackets = 0;
1341 stats.opackets -= port->stats.opackets;
1342 port->stats.opackets = 0;
1343 stats.ibytes -= port->stats.ibytes;
1344 port->stats.ibytes = 0;
1345 stats.obytes -= port->stats.obytes;
1346 port->stats.obytes = 0;
1347 stats.imissed -= port->stats.imissed;
1348 port->stats.imissed = 0;
1349 stats.oerrors -= port->stats.oerrors;
1350 port->stats.oerrors = 0;
1351 stats.rx_nombuf -= port->stats.rx_nombuf;
1352 port->stats.rx_nombuf = 0;
1354 total_recv += stats.ipackets;
1355 total_xmit += stats.opackets;
1356 total_rx_dropped += stats.imissed;
1357 total_tx_dropped += port->tx_dropped;
1358 total_rx_nombuf += stats.rx_nombuf;
1360 fwd_port_stats_display(pt_id, &stats);
1363 printf("\n %s Accumulated forward statistics for all ports"
1365 acc_stats_border, acc_stats_border);
1366 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1368 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1370 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1371 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1372 if (total_rx_nombuf > 0)
1373 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1374 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1376 acc_stats_border, acc_stats_border);
1377 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1379 printf("\n CPU cycles/packet=%u (total cycles="
1380 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1381 (unsigned int)(fwd_cycles / total_recv),
1382 fwd_cycles, total_recv);
1384 printf("\nDone.\n");
1389 dev_set_link_up(portid_t pid)
1391 if (rte_eth_dev_set_link_up(pid) < 0)
1392 printf("\nSet link up fail.\n");
1396 dev_set_link_down(portid_t pid)
1398 if (rte_eth_dev_set_link_down(pid) < 0)
1399 printf("\nSet link down fail.\n");
1403 all_ports_started(void)
1406 struct rte_port *port;
1408 RTE_ETH_FOREACH_DEV(pi) {
1410 /* Check if there is a port which is not started */
1411 if ((port->port_status != RTE_PORT_STARTED) &&
1412 (port->slave_flag == 0))
1416 /* No port is not started */
1421 port_is_stopped(portid_t port_id)
1423 struct rte_port *port = &ports[port_id];
1425 if ((port->port_status != RTE_PORT_STOPPED) &&
1426 (port->slave_flag == 0))
1432 all_ports_stopped(void)
1436 RTE_ETH_FOREACH_DEV(pi) {
1437 if (!port_is_stopped(pi))
1445 port_is_started(portid_t port_id)
1447 if (port_id_is_invalid(port_id, ENABLED_WARN))
1450 if (ports[port_id].port_status != RTE_PORT_STARTED)
1457 port_is_closed(portid_t port_id)
1459 if (port_id_is_invalid(port_id, ENABLED_WARN))
1462 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1469 start_port(portid_t pid)
1471 int diag, need_check_link_status = -1;
1474 struct rte_port *port;
1475 struct ether_addr mac_addr;
1476 enum rte_eth_event_type event_type;
1478 if (port_id_is_invalid(pid, ENABLED_WARN))
1483 RTE_ETH_FOREACH_DEV(pi) {
1484 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1487 need_check_link_status = 0;
1489 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1490 RTE_PORT_HANDLING) == 0) {
1491 printf("Port %d is now not stopped\n", pi);
1495 if (port->need_reconfig > 0) {
1496 port->need_reconfig = 0;
1498 if (flow_isolate_all) {
1499 int ret = port_flow_isolate(pi, 1);
1501 printf("Failed to apply isolated"
1502 " mode on port %d\n", pi);
1507 printf("Configuring Port %d (socket %u)\n", pi,
1509 /* configure port */
1510 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1513 if (rte_atomic16_cmpset(&(port->port_status),
1514 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1515 printf("Port %d can not be set back "
1516 "to stopped\n", pi);
1517 printf("Fail to configure port %d\n", pi);
1518 /* try to reconfigure port next time */
1519 port->need_reconfig = 1;
1523 if (port->need_reconfig_queues > 0) {
1524 port->need_reconfig_queues = 0;
1525 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1526 /* Apply Tx offloads configuration */
1527 port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1528 /* setup tx queues */
1529 for (qi = 0; qi < nb_txq; qi++) {
1530 if ((numa_support) &&
1531 (txring_numa[pi] != NUMA_NO_CONFIG))
1532 diag = rte_eth_tx_queue_setup(pi, qi,
1533 nb_txd,txring_numa[pi],
1536 diag = rte_eth_tx_queue_setup(pi, qi,
1537 nb_txd,port->socket_id,
1543 /* Fail to setup tx queue, return */
1544 if (rte_atomic16_cmpset(&(port->port_status),
1546 RTE_PORT_STOPPED) == 0)
1547 printf("Port %d can not be set back "
1548 "to stopped\n", pi);
1549 printf("Fail to configure port %d tx queues\n", pi);
1550 /* try to reconfigure queues next time */
1551 port->need_reconfig_queues = 1;
1554 /* Apply Rx offloads configuration */
1555 port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1556 /* setup rx queues */
1557 for (qi = 0; qi < nb_rxq; qi++) {
1558 if ((numa_support) &&
1559 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1560 struct rte_mempool * mp =
1561 mbuf_pool_find(rxring_numa[pi]);
1563 printf("Failed to setup RX queue:"
1564 "No mempool allocation"
1565 " on the socket %d\n",
1570 diag = rte_eth_rx_queue_setup(pi, qi,
1571 nb_rxd,rxring_numa[pi],
1572 &(port->rx_conf),mp);
1574 struct rte_mempool *mp =
1575 mbuf_pool_find(port->socket_id);
1577 printf("Failed to setup RX queue:"
1578 "No mempool allocation"
1579 " on the socket %d\n",
1583 diag = rte_eth_rx_queue_setup(pi, qi,
1584 nb_rxd,port->socket_id,
1585 &(port->rx_conf), mp);
1590 /* Fail to setup rx queue, return */
1591 if (rte_atomic16_cmpset(&(port->port_status),
1593 RTE_PORT_STOPPED) == 0)
1594 printf("Port %d can not be set back "
1595 "to stopped\n", pi);
1596 printf("Fail to configure port %d rx queues\n", pi);
1597 /* try to reconfigure queues next time */
1598 port->need_reconfig_queues = 1;
1604 if (rte_eth_dev_start(pi) < 0) {
1605 printf("Fail to start port %d\n", pi);
1607 /* Fail to setup rx queue, return */
1608 if (rte_atomic16_cmpset(&(port->port_status),
1609 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1610 printf("Port %d can not be set back to "
1615 if (rte_atomic16_cmpset(&(port->port_status),
1616 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1617 printf("Port %d can not be set into started\n", pi);
1619 rte_eth_macaddr_get(pi, &mac_addr);
1620 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1621 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1622 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1623 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1625 /* at least one port started, need checking link status */
1626 need_check_link_status = 1;
1629 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1630 event_type < RTE_ETH_EVENT_MAX;
1632 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1637 printf("Failed to setup even callback for event %d\n",
1643 if (need_check_link_status == 1 && !no_link_check)
1644 check_all_ports_link_status(RTE_PORT_ALL);
1645 else if (need_check_link_status == 0)
1646 printf("Please stop the ports first\n");
1653 stop_port(portid_t pid)
1656 struct rte_port *port;
1657 int need_check_link_status = 0;
1664 if (port_id_is_invalid(pid, ENABLED_WARN))
1667 printf("Stopping ports...\n");
1669 RTE_ETH_FOREACH_DEV(pi) {
1670 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1673 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1674 printf("Please remove port %d from forwarding configuration.\n", pi);
1678 if (port_is_bonding_slave(pi)) {
1679 printf("Please remove port %d from bonded device.\n", pi);
1684 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1685 RTE_PORT_HANDLING) == 0)
1688 rte_eth_dev_stop(pi);
1690 if (rte_atomic16_cmpset(&(port->port_status),
1691 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1692 printf("Port %d can not be set into stopped\n", pi);
1693 need_check_link_status = 1;
1695 if (need_check_link_status && !no_link_check)
1696 check_all_ports_link_status(RTE_PORT_ALL);
1702 close_port(portid_t pid)
1705 struct rte_port *port;
1707 if (port_id_is_invalid(pid, ENABLED_WARN))
1710 printf("Closing ports...\n");
1712 RTE_ETH_FOREACH_DEV(pi) {
1713 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1716 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1717 printf("Please remove port %d from forwarding configuration.\n", pi);
1721 if (port_is_bonding_slave(pi)) {
1722 printf("Please remove port %d from bonded device.\n", pi);
1727 if (rte_atomic16_cmpset(&(port->port_status),
1728 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1729 printf("Port %d is already closed\n", pi);
1733 if (rte_atomic16_cmpset(&(port->port_status),
1734 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1735 printf("Port %d is now not stopped\n", pi);
1739 if (port->flow_list)
1740 port_flow_flush(pi);
1741 rte_eth_dev_close(pi);
1743 if (rte_atomic16_cmpset(&(port->port_status),
1744 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1745 printf("Port %d cannot be set to closed\n", pi);
1752 reset_port(portid_t pid)
1756 struct rte_port *port;
1758 if (port_id_is_invalid(pid, ENABLED_WARN))
1761 printf("Resetting ports...\n");
1763 RTE_ETH_FOREACH_DEV(pi) {
1764 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1767 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1768 printf("Please remove port %d from forwarding "
1769 "configuration.\n", pi);
1773 if (port_is_bonding_slave(pi)) {
1774 printf("Please remove port %d from bonded device.\n",
1779 diag = rte_eth_dev_reset(pi);
1782 port->need_reconfig = 1;
1783 port->need_reconfig_queues = 1;
1785 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1793 attach_port(char *identifier)
1796 unsigned int socket_id;
1798 printf("Attaching a new port...\n");
1800 if (identifier == NULL) {
1801 printf("Invalid parameters are specified\n");
1805 if (rte_eth_dev_attach(identifier, &pi))
1808 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1809 /* if socket_id is invalid, set to 0 */
1810 if (check_socket_id(socket_id) < 0)
1812 reconfig(pi, socket_id);
1813 rte_eth_promiscuous_enable(pi);
1815 nb_ports = rte_eth_dev_count();
1817 ports[pi].port_status = RTE_PORT_STOPPED;
1819 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1824 detach_port(portid_t port_id)
1826 char name[RTE_ETH_NAME_MAX_LEN];
1828 printf("Detaching a port...\n");
1830 if (!port_is_closed(port_id)) {
1831 printf("Please close port first\n");
1835 if (ports[port_id].flow_list)
1836 port_flow_flush(port_id);
1838 if (rte_eth_dev_detach(port_id, name)) {
1839 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1843 nb_ports = rte_eth_dev_count();
1845 printf("Port '%s' is detached. Now total ports is %d\n",
1857 stop_packet_forwarding();
1859 if (ports != NULL) {
1861 RTE_ETH_FOREACH_DEV(pt_id) {
1862 printf("\nShutting down port %d...\n", pt_id);
1868 printf("\nBye...\n");
1871 typedef void (*cmd_func_t)(void);
1872 struct pmd_test_command {
1873 const char *cmd_name;
1874 cmd_func_t cmd_func;
1877 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1879 /* Check the link status of all ports in up to 9s, and print them finally */
1881 check_all_ports_link_status(uint32_t port_mask)
1883 #define CHECK_INTERVAL 100 /* 100ms */
1884 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1886 uint8_t count, all_ports_up, print_flag = 0;
1887 struct rte_eth_link link;
1889 printf("Checking link statuses...\n");
1891 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1893 RTE_ETH_FOREACH_DEV(portid) {
1894 if ((port_mask & (1 << portid)) == 0)
1896 memset(&link, 0, sizeof(link));
1897 rte_eth_link_get_nowait(portid, &link);
1898 /* print link status if flag set */
1899 if (print_flag == 1) {
1900 if (link.link_status)
1902 "Port%d Link Up. speed %u Mbps- %s\n",
1903 portid, link.link_speed,
1904 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1905 ("full-duplex") : ("half-duplex\n"));
1907 printf("Port %d Link Down\n", portid);
1910 /* clear all_ports_up flag if any link down */
1911 if (link.link_status == ETH_LINK_DOWN) {
1916 /* after finally printing all link status, get out */
1917 if (print_flag == 1)
1920 if (all_ports_up == 0) {
1922 rte_delay_ms(CHECK_INTERVAL);
1925 /* set the print_flag if all ports up or timeout */
1926 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1936 rmv_event_callback(void *arg)
1938 struct rte_eth_dev *dev;
1939 portid_t port_id = (intptr_t)arg;
1941 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1942 dev = &rte_eth_devices[port_id];
1945 close_port(port_id);
1946 printf("removing device %s\n", dev->device->name);
1947 if (rte_eal_dev_detach(dev->device))
1948 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1952 /* This function is used by the interrupt thread */
1954 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1957 static const char * const event_desc[] = {
1958 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1959 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1960 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1961 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1962 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1963 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1964 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1965 [RTE_ETH_EVENT_NEW] = "device probed",
1966 [RTE_ETH_EVENT_DESTROY] = "device released",
1967 [RTE_ETH_EVENT_MAX] = NULL,
1970 RTE_SET_USED(param);
1971 RTE_SET_USED(ret_param);
1973 if (type >= RTE_ETH_EVENT_MAX) {
1974 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1975 port_id, __func__, type);
1977 } else if (event_print_mask & (UINT32_C(1) << type)) {
1978 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1984 case RTE_ETH_EVENT_INTR_RMV:
1985 if (rte_eal_alarm_set(100000,
1986 rmv_event_callback, (void *)(intptr_t)port_id))
1987 fprintf(stderr, "Could not set up deferred device removal\n");
1996 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2000 uint8_t mapping_found = 0;
2002 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2003 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2004 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2005 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2006 tx_queue_stats_mappings[i].queue_id,
2007 tx_queue_stats_mappings[i].stats_counter_id);
2014 port->tx_queue_stats_mapping_enabled = 1;
2019 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2023 uint8_t mapping_found = 0;
2025 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2026 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2027 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2028 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2029 rx_queue_stats_mappings[i].queue_id,
2030 rx_queue_stats_mappings[i].stats_counter_id);
2037 port->rx_queue_stats_mapping_enabled = 1;
2042 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2046 diag = set_tx_queue_stats_mapping_registers(pi, port);
2048 if (diag == -ENOTSUP) {
2049 port->tx_queue_stats_mapping_enabled = 0;
2050 printf("TX queue stats mapping not supported port id=%d\n", pi);
2053 rte_exit(EXIT_FAILURE,
2054 "set_tx_queue_stats_mapping_registers "
2055 "failed for port id=%d diag=%d\n",
2059 diag = set_rx_queue_stats_mapping_registers(pi, port);
2061 if (diag == -ENOTSUP) {
2062 port->rx_queue_stats_mapping_enabled = 0;
2063 printf("RX queue stats mapping not supported port id=%d\n", pi);
2066 rte_exit(EXIT_FAILURE,
2067 "set_rx_queue_stats_mapping_registers "
2068 "failed for port id=%d diag=%d\n",
2074 rxtx_port_config(struct rte_port *port)
2076 port->rx_conf = port->dev_info.default_rxconf;
2077 port->tx_conf = port->dev_info.default_txconf;
2079 /* Check if any RX/TX parameters have been passed */
2080 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2081 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2083 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2084 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2086 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2087 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2089 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2090 port->rx_conf.rx_free_thresh = rx_free_thresh;
2092 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2093 port->rx_conf.rx_drop_en = rx_drop_en;
2095 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2096 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2098 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2099 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2101 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2102 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2104 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2105 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2107 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2108 port->tx_conf.tx_free_thresh = tx_free_thresh;
2112 init_port_config(void)
2115 struct rte_port *port;
2117 RTE_ETH_FOREACH_DEV(pid) {
2119 port->dev_conf.fdir_conf = fdir_conf;
2121 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2122 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2124 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2125 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2128 if (port->dcb_flag == 0) {
2129 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2130 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2132 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2135 rxtx_port_config(port);
2137 rte_eth_macaddr_get(pid, &port->eth_addr);
2139 map_port_queue_stats_mapping_registers(pid, port);
2140 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2141 rte_pmd_ixgbe_bypass_init(pid);
2144 if (lsc_interrupt &&
2145 (rte_eth_devices[pid].data->dev_flags &
2146 RTE_ETH_DEV_INTR_LSC))
2147 port->dev_conf.intr_conf.lsc = 1;
2148 if (rmv_interrupt &&
2149 (rte_eth_devices[pid].data->dev_flags &
2150 RTE_ETH_DEV_INTR_RMV))
2151 port->dev_conf.intr_conf.rmv = 1;
2153 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2154 /* Detect softnic port */
2155 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2156 port->softnic_enable = 1;
2157 memset(&port->softport, 0, sizeof(struct softnic_port));
2159 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2160 port->softport.tm_flag = 1;
2166 void set_port_slave_flag(portid_t slave_pid)
2168 struct rte_port *port;
2170 port = &ports[slave_pid];
2171 port->slave_flag = 1;
2174 void clear_port_slave_flag(portid_t slave_pid)
2176 struct rte_port *port;
2178 port = &ports[slave_pid];
2179 port->slave_flag = 0;
2182 uint8_t port_is_bonding_slave(portid_t slave_pid)
2184 struct rte_port *port;
2186 port = &ports[slave_pid];
2187 return port->slave_flag;
2190 const uint16_t vlan_tags[] = {
2191 0, 1, 2, 3, 4, 5, 6, 7,
2192 8, 9, 10, 11, 12, 13, 14, 15,
2193 16, 17, 18, 19, 20, 21, 22, 23,
2194 24, 25, 26, 27, 28, 29, 30, 31
2198 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2199 enum dcb_mode_enable dcb_mode,
2200 enum rte_eth_nb_tcs num_tcs,
2206 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2207 * given above, and the number of traffic classes available for use.
2209 if (dcb_mode == DCB_VT_ENABLED) {
2210 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2211 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2212 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2213 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2215 /* VMDQ+DCB RX and TX configurations */
2216 vmdq_rx_conf->enable_default_pool = 0;
2217 vmdq_rx_conf->default_pool = 0;
2218 vmdq_rx_conf->nb_queue_pools =
2219 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2220 vmdq_tx_conf->nb_queue_pools =
2221 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2223 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2224 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2225 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2226 vmdq_rx_conf->pool_map[i].pools =
2227 1 << (i % vmdq_rx_conf->nb_queue_pools);
2229 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2230 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2231 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2234 /* set DCB mode of RX and TX of multiple queues */
2235 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2236 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2238 struct rte_eth_dcb_rx_conf *rx_conf =
2239 ð_conf->rx_adv_conf.dcb_rx_conf;
2240 struct rte_eth_dcb_tx_conf *tx_conf =
2241 ð_conf->tx_adv_conf.dcb_tx_conf;
2243 rx_conf->nb_tcs = num_tcs;
2244 tx_conf->nb_tcs = num_tcs;
2246 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2247 rx_conf->dcb_tc[i] = i % num_tcs;
2248 tx_conf->dcb_tc[i] = i % num_tcs;
2250 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2251 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2252 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2256 eth_conf->dcb_capability_en =
2257 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2259 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2265 init_port_dcb_config(portid_t pid,
2266 enum dcb_mode_enable dcb_mode,
2267 enum rte_eth_nb_tcs num_tcs,
2270 struct rte_eth_conf port_conf;
2271 struct rte_port *rte_port;
2275 rte_port = &ports[pid];
2277 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2278 /* Enter DCB configuration status */
2281 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2282 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2285 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2288 * Write the configuration into the device.
2289 * Set the numbers of RX & TX queues to 0, so
2290 * the RX & TX queues will not be setup.
2292 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2294 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2296 /* If dev_info.vmdq_pool_base is greater than 0,
2297 * the queue id of vmdq pools is started after pf queues.
2299 if (dcb_mode == DCB_VT_ENABLED &&
2300 rte_port->dev_info.vmdq_pool_base > 0) {
2301 printf("VMDQ_DCB multi-queue mode is nonsensical"
2302 " for port %d.", pid);
2306 /* Assume the ports in testpmd have the same dcb capability
2307 * and has the same number of rxq and txq in dcb mode
2309 if (dcb_mode == DCB_VT_ENABLED) {
2310 if (rte_port->dev_info.max_vfs > 0) {
2311 nb_rxq = rte_port->dev_info.nb_rx_queues;
2312 nb_txq = rte_port->dev_info.nb_tx_queues;
2314 nb_rxq = rte_port->dev_info.max_rx_queues;
2315 nb_txq = rte_port->dev_info.max_tx_queues;
2318 /*if vt is disabled, use all pf queues */
2319 if (rte_port->dev_info.vmdq_pool_base == 0) {
2320 nb_rxq = rte_port->dev_info.max_rx_queues;
2321 nb_txq = rte_port->dev_info.max_tx_queues;
2323 nb_rxq = (queueid_t)num_tcs;
2324 nb_txq = (queueid_t)num_tcs;
2328 rx_free_thresh = 64;
2330 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2332 rxtx_port_config(rte_port);
2334 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2335 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2336 rx_vft_set(pid, vlan_tags[i], 1);
2338 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2339 map_port_queue_stats_mapping_registers(pid, rte_port);
2341 rte_port->dcb_flag = 1;
2349 /* Configuration of Ethernet ports. */
2350 ports = rte_zmalloc("testpmd: ports",
2351 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2352 RTE_CACHE_LINE_SIZE);
2353 if (ports == NULL) {
2354 rte_exit(EXIT_FAILURE,
2355 "rte_zmalloc(%d struct rte_port) failed\n",
2371 const char clr[] = { 27, '[', '2', 'J', '\0' };
2372 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2374 /* Clear screen and move to top left */
2375 printf("%s%s", clr, top_left);
2377 printf("\nPort statistics ====================================");
2378 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2379 nic_stats_display(fwd_ports_ids[i]);
2383 signal_handler(int signum)
2385 if (signum == SIGINT || signum == SIGTERM) {
2386 printf("\nSignal %d received, preparing to exit...\n",
2388 #ifdef RTE_LIBRTE_PDUMP
2389 /* uninitialize packet capture framework */
2392 #ifdef RTE_LIBRTE_LATENCY_STATS
2393 rte_latencystats_uninit();
2396 /* Set flag to indicate the force termination. */
2398 /* exit with the expected status */
2399 signal(signum, SIG_DFL);
2400 kill(getpid(), signum);
2405 main(int argc, char** argv)
2410 signal(SIGINT, signal_handler);
2411 signal(SIGTERM, signal_handler);
2413 diag = rte_eal_init(argc, argv);
2415 rte_panic("Cannot init EAL\n");
2417 testpmd_logtype = rte_log_register("testpmd");
2418 if (testpmd_logtype < 0)
2419 rte_panic("Cannot register log type");
2420 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2422 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2423 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2427 #ifdef RTE_LIBRTE_PDUMP
2428 /* initialize packet capture framework */
2429 rte_pdump_init(NULL);
2432 nb_ports = (portid_t) rte_eth_dev_count();
2434 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2436 /* allocate port structures, and init them */
2439 set_def_fwd_config();
2441 rte_panic("Empty set of forwarding logical cores - check the "
2442 "core mask supplied in the command parameters\n");
2444 /* Bitrate/latency stats disabled by default */
2445 #ifdef RTE_LIBRTE_BITRATE
2446 bitrate_enabled = 0;
2448 #ifdef RTE_LIBRTE_LATENCY_STATS
2449 latencystats_enabled = 0;
2455 launch_args_parse(argc, argv);
2457 if (tx_first && interactive)
2458 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2459 "interactive mode.\n");
2461 if (tx_first && lsc_interrupt) {
2462 printf("Warning: lsc_interrupt needs to be off when "
2463 " using tx_first. Disabling.\n");
2467 if (!nb_rxq && !nb_txq)
2468 printf("Warning: Either rx or tx queues should be non-zero\n");
2470 if (nb_rxq > 1 && nb_rxq > nb_txq)
2471 printf("Warning: nb_rxq=%d enables RSS configuration, "
2472 "but nb_txq=%d will prevent to fully test it.\n",
2476 if (start_port(RTE_PORT_ALL) != 0)
2477 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2479 /* set all ports to promiscuous mode by default */
2480 RTE_ETH_FOREACH_DEV(port_id)
2481 rte_eth_promiscuous_enable(port_id);
2483 /* Init metrics library */
2484 rte_metrics_init(rte_socket_id());
2486 #ifdef RTE_LIBRTE_LATENCY_STATS
2487 if (latencystats_enabled != 0) {
2488 int ret = rte_latencystats_init(1, NULL);
2490 printf("Warning: latencystats init()"
2491 " returned error %d\n", ret);
2492 printf("Latencystats running on lcore %d\n",
2493 latencystats_lcore_id);
2497 /* Setup bitrate stats */
2498 #ifdef RTE_LIBRTE_BITRATE
2499 if (bitrate_enabled != 0) {
2500 bitrate_data = rte_stats_bitrate_create();
2501 if (bitrate_data == NULL)
2502 rte_exit(EXIT_FAILURE,
2503 "Could not allocate bitrate data.\n");
2504 rte_stats_bitrate_reg(bitrate_data);
2508 #ifdef RTE_LIBRTE_CMDLINE
2509 if (strlen(cmdline_filename) != 0)
2510 cmdline_read_from_file(cmdline_filename);
2512 if (interactive == 1) {
2514 printf("Start automatic packet forwarding\n");
2515 start_packet_forwarding(0);
2527 printf("No commandline core given, start packet forwarding\n");
2528 start_packet_forwarding(tx_first);
2529 if (stats_period != 0) {
2530 uint64_t prev_time = 0, cur_time, diff_time = 0;
2531 uint64_t timer_period;
2533 /* Convert to number of cycles */
2534 timer_period = stats_period * rte_get_timer_hz();
2536 while (f_quit == 0) {
2537 cur_time = rte_get_timer_cycles();
2538 diff_time += cur_time - prev_time;
2540 if (diff_time >= timer_period) {
2542 /* Reset the timer */
2545 /* Sleep to avoid unnecessary checks */
2546 prev_time = cur_time;
2551 printf("Press enter to exit\n");
2552 rc = read(0, &c, 1);