1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
70 #include <rte_eth_bond.h>
76 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
77 #define HUGE_FLAG (0x40000)
79 #define HUGE_FLAG MAP_HUGETLB
82 #ifndef MAP_HUGE_SHIFT
83 /* older kernels (or FreeBSD) will not have this define */
84 #define HUGE_SHIFT (26)
86 #define HUGE_SHIFT MAP_HUGE_SHIFT
89 #define EXTMEM_HEAP_NAME "extmem"
91 * Zone size with the malloc overhead (max of debug and release variants)
92 * must fit into the smallest supported hugepage size (2M),
93 * so that an IOVA-contiguous zone of this size can always be allocated
94 * if there are free 2M hugepages.
96 #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
98 uint16_t verbose_level = 0; /**< Silent by default. */
99 int testpmd_logtype; /**< Log type for testpmd logs */
101 /* use main core for command line ? */
102 uint8_t interactive = 0;
103 uint8_t auto_start = 0;
105 char cmdline_filename[PATH_MAX] = {0};
108 * NUMA support configuration.
109 * When set, the NUMA support attempts to dispatch the allocation of the
110 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
111 * probed ports among the CPU sockets 0 and 1.
112 * Otherwise, all memory is allocated from CPU socket 0.
114 uint8_t numa_support = 1; /**< numa enabled by default */
117 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
120 uint8_t socket_num = UMA_NO_CONFIG;
123 * Select mempool allocation type:
124 * - native: use regular DPDK memory
125 * - anon: use regular DPDK memory to create mempool, but populate using
126 * anonymous memory (may not be IOVA-contiguous)
127 * - xmem: use externally allocated hugepage memory
129 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
132 * Store specified sockets on which memory pool to be used by ports
135 uint8_t port_numa[RTE_MAX_ETHPORTS];
138 * Store specified sockets on which RX ring to be used by ports
141 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
144 * Store specified sockets on which TX ring to be used by ports
147 uint8_t txring_numa[RTE_MAX_ETHPORTS];
150 * Record the Ethernet address of peer target ports to which packets are
152 * Must be instantiated with the ethernet addresses of peer traffic generator
155 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
156 portid_t nb_peer_eth_addrs = 0;
159 * Probed Target Environment.
161 struct rte_port *ports; /**< For all probed ethernet ports. */
162 portid_t nb_ports; /**< Number of probed ethernet ports. */
163 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
164 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
166 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
169 * Test Forwarding Configuration.
170 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
171 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
173 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
174 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
175 portid_t nb_cfg_ports; /**< Number of configured ports. */
176 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
178 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
179 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
181 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
182 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
185 * Forwarding engines.
187 struct fwd_engine * fwd_engines[] = {
197 &five_tuple_swap_fwd_engine,
198 #ifdef RTE_LIBRTE_IEEE1588
199 &ieee1588_fwd_engine,
205 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
206 uint16_t mempool_flags;
208 struct fwd_config cur_fwd_config;
209 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
210 uint32_t retry_enabled;
211 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
212 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
214 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
215 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
216 DEFAULT_MBUF_DATA_SIZE
217 }; /**< Mbuf data space size. */
218 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
219 * specified on command-line. */
220 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
222 /** Extended statistics to show. */
223 struct rte_eth_xstat_name *xstats_display;
225 unsigned int xstats_display_num; /**< Size of extended statistics to show */
228 * In container, it cannot terminate the process which running with 'stats-period'
229 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
234 * Max Rx frame size, set by '--max-pkt-len' parameter.
236 uint32_t max_rx_pkt_len;
239 * Configuration of packet segments used to scatter received packets
240 * if some of split features is configured.
242 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
243 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
244 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
245 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
248 * Configuration of packet segments used by the "txonly" processing engine.
250 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
251 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
252 TXONLY_DEF_PACKET_LEN,
254 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
256 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
257 /**< Split policy for packets to TX. */
259 uint8_t txonly_multi_flow;
260 /**< Whether multiple flows are generated in TXONLY mode. */
262 uint32_t tx_pkt_times_inter;
263 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
265 uint32_t tx_pkt_times_intra;
266 /**< Timings for send scheduling in TXONLY mode, time between packets. */
268 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
269 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
270 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
271 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
273 /* current configuration is in DCB or not,0 means it is not in DCB mode */
274 uint8_t dcb_config = 0;
277 * Configurable number of RX/TX queues.
279 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
280 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
281 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
284 * Configurable number of RX/TX ring descriptors.
285 * Defaults are supplied by drivers via ethdev.
287 #define RTE_TEST_RX_DESC_DEFAULT 0
288 #define RTE_TEST_TX_DESC_DEFAULT 0
289 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
290 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
292 #define RTE_PMD_PARAM_UNSET -1
294 * Configurable values of RX and TX ring threshold registers.
297 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
298 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
299 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
301 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
302 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
303 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
306 * Configurable value of RX free threshold.
308 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
311 * Configurable value of RX drop enable.
313 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
316 * Configurable value of TX free threshold.
318 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
321 * Configurable value of TX RS bit threshold.
323 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
326 * Configurable value of buffered packets before sending.
328 uint16_t noisy_tx_sw_bufsz;
331 * Configurable value of packet buffer timeout.
333 uint16_t noisy_tx_sw_buf_flush_time;
336 * Configurable value for size of VNF internal memory area
337 * used for simulating noisy neighbour behaviour
339 uint64_t noisy_lkup_mem_sz;
342 * Configurable value of number of random writes done in
343 * VNF simulation memory area.
345 uint64_t noisy_lkup_num_writes;
348 * Configurable value of number of random reads done in
349 * VNF simulation memory area.
351 uint64_t noisy_lkup_num_reads;
354 * Configurable value of number of random reads/writes done in
355 * VNF simulation memory area.
357 uint64_t noisy_lkup_num_reads_writes;
360 * Receive Side Scaling (RSS) configuration.
362 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
365 * Port topology configuration
367 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
370 * Avoids to flush all the RX streams before starts forwarding.
372 uint8_t no_flush_rx = 0; /* flush by default */
375 * Flow API isolated mode.
377 uint8_t flow_isolate_all;
380 * Avoids to check link status when starting/stopping a port.
382 uint8_t no_link_check = 0; /* check by default */
385 * Don't automatically start all ports in interactive mode.
387 uint8_t no_device_start = 0;
390 * Enable link status change notification
392 uint8_t lsc_interrupt = 1; /* enabled by default */
395 * Enable device removal notification.
397 uint8_t rmv_interrupt = 1; /* enabled by default */
399 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
401 /* After attach, port setup is called on event or by iterator */
402 bool setup_on_probe_event = true;
404 /* Clear ptypes on port initialization. */
405 uint8_t clear_ptypes = true;
407 /* Hairpin ports configuration mode. */
408 uint16_t hairpin_mode;
410 /* Pretty printing of ethdev events */
411 static const char * const eth_event_desc[] = {
412 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
413 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
414 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
415 [RTE_ETH_EVENT_INTR_RESET] = "reset",
416 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
417 [RTE_ETH_EVENT_IPSEC] = "IPsec",
418 [RTE_ETH_EVENT_MACSEC] = "MACsec",
419 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
420 [RTE_ETH_EVENT_NEW] = "device probed",
421 [RTE_ETH_EVENT_DESTROY] = "device released",
422 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
423 [RTE_ETH_EVENT_MAX] = NULL,
427 * Display or mask ether events
428 * Default to all events except VF_MBOX
430 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
431 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
432 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
433 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
434 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
435 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
436 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
437 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
439 * Decide if all memory are locked for performance.
444 * NIC bypass mode configuration options.
447 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
448 /* The NIC bypass watchdog timeout. */
449 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
453 #ifdef RTE_LIB_LATENCYSTATS
456 * Set when latency stats is enabled in the commandline
458 uint8_t latencystats_enabled;
461 * Lcore ID to service latency statistics.
463 lcoreid_t latencystats_lcore_id = -1;
468 * Ethernet device configuration.
470 struct rte_eth_rxmode rx_mode;
472 struct rte_eth_txmode tx_mode = {
473 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
476 struct rte_eth_fdir_conf fdir_conf = {
477 .mode = RTE_FDIR_MODE_NONE,
478 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
479 .status = RTE_FDIR_REPORT_STATUS,
481 .vlan_tci_mask = 0xFFEF,
483 .src_ip = 0xFFFFFFFF,
484 .dst_ip = 0xFFFFFFFF,
487 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
488 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
490 .src_port_mask = 0xFFFF,
491 .dst_port_mask = 0xFFFF,
492 .mac_addr_byte_mask = 0xFF,
493 .tunnel_type_mask = 1,
494 .tunnel_id_mask = 0xFFFFFFFF,
499 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
502 * Display zero values by default for xstats
504 uint8_t xstats_hide_zero;
507 * Measure of CPU cycles disabled by default
509 uint8_t record_core_cycles;
512 * Display of RX and TX bursts disabled by default
514 uint8_t record_burst_stats;
517 * Number of ports per shared Rx queue group, 0 disable.
521 unsigned int num_sockets = 0;
522 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
524 #ifdef RTE_LIB_BITRATESTATS
525 /* Bitrate statistics */
526 struct rte_stats_bitrates *bitrate_data;
527 lcoreid_t bitrate_lcore_id;
528 uint8_t bitrate_enabled;
532 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
533 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
537 * hexadecimal bitmask of RX mq mode can be enabled.
539 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
542 * Used to set forced link speed
544 uint32_t eth_link_speed;
547 * ID of the current process in multi-process, used to
548 * configure the queues to be polled.
553 * Number of processes in multi-process, used to
554 * configure the queues to be polled.
556 unsigned int num_procs = 1;
559 eth_rx_metadata_negotiate_mp(uint16_t port_id)
561 uint64_t rx_meta_features = 0;
564 if (!is_proc_primary())
567 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
568 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
569 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
571 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
573 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
574 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
578 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
579 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
583 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
584 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
587 } else if (ret != -ENOTSUP) {
588 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
589 port_id, rte_strerror(-ret));
594 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
595 const struct rte_eth_conf *dev_conf)
597 if (is_proc_primary())
598 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
604 change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
608 portid_t slave_pids[RTE_MAX_ETHPORTS];
609 struct rte_port *port;
614 num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
616 if (num_slaves < 0) {
617 fprintf(stderr, "Failed to get slave list for port = %u\n",
622 for (i = 0; i < num_slaves; i++) {
623 slave_pid = slave_pids[i];
624 port = &ports[slave_pid];
626 is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
629 RTE_SET_USED(bond_pid);
630 RTE_SET_USED(is_stop);
636 eth_dev_start_mp(uint16_t port_id)
640 if (is_proc_primary()) {
641 ret = rte_eth_dev_start(port_id);
645 struct rte_port *port = &ports[port_id];
648 * Starting a bonded port also starts all slaves under the bonded
649 * device. So if this port is bond device, we need to modify the
650 * port status of these slaves.
652 if (port->bond_flag == 1)
653 return change_bonding_slave_port_status(port_id, false);
660 eth_dev_stop_mp(uint16_t port_id)
664 if (is_proc_primary()) {
665 ret = rte_eth_dev_stop(port_id);
669 struct rte_port *port = &ports[port_id];
672 * Stopping a bonded port also stops all slaves under the bonded
673 * device. So if this port is bond device, we need to modify the
674 * port status of these slaves.
676 if (port->bond_flag == 1)
677 return change_bonding_slave_port_status(port_id, true);
684 mempool_free_mp(struct rte_mempool *mp)
686 if (is_proc_primary())
687 rte_mempool_free(mp);
691 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
693 if (is_proc_primary())
694 return rte_eth_dev_set_mtu(port_id, mtu);
699 /* Forward function declarations */
700 static void setup_attached_port(portid_t pi);
701 static void check_all_ports_link_status(uint32_t port_mask);
702 static int eth_event_callback(portid_t port_id,
703 enum rte_eth_event_type type,
704 void *param, void *ret_param);
705 static void dev_event_callback(const char *device_name,
706 enum rte_dev_event_type type,
708 static void fill_xstats_display_info(void);
711 * Check if all the ports are started.
712 * If yes, return positive value. If not, return zero.
714 static int all_ports_started(void);
717 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
718 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
721 /* Holds the registered mbuf dynamic flags names. */
722 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
726 * Helper function to check if socket is already discovered.
727 * If yes, return positive value. If not, return zero.
730 new_socket_id(unsigned int socket_id)
734 for (i = 0; i < num_sockets; i++) {
735 if (socket_ids[i] == socket_id)
742 * Setup default configuration.
745 set_default_fwd_lcores_config(void)
749 unsigned int sock_num;
752 for (i = 0; i < RTE_MAX_LCORE; i++) {
753 if (!rte_lcore_is_enabled(i))
755 sock_num = rte_lcore_to_socket_id(i);
756 if (new_socket_id(sock_num)) {
757 if (num_sockets >= RTE_MAX_NUMA_NODES) {
758 rte_exit(EXIT_FAILURE,
759 "Total sockets greater than %u\n",
762 socket_ids[num_sockets++] = sock_num;
764 if (i == rte_get_main_lcore())
766 fwd_lcores_cpuids[nb_lc++] = i;
768 nb_lcores = (lcoreid_t) nb_lc;
769 nb_cfg_lcores = nb_lcores;
774 set_def_peer_eth_addrs(void)
778 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
779 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
780 peer_eth_addrs[i].addr_bytes[5] = i;
785 set_default_fwd_ports_config(void)
790 RTE_ETH_FOREACH_DEV(pt_id) {
791 fwd_ports_ids[i++] = pt_id;
793 /* Update sockets info according to the attached device */
794 int socket_id = rte_eth_dev_socket_id(pt_id);
795 if (socket_id >= 0 && new_socket_id(socket_id)) {
796 if (num_sockets >= RTE_MAX_NUMA_NODES) {
797 rte_exit(EXIT_FAILURE,
798 "Total sockets greater than %u\n",
801 socket_ids[num_sockets++] = socket_id;
805 nb_cfg_ports = nb_ports;
806 nb_fwd_ports = nb_ports;
810 set_def_fwd_config(void)
812 set_default_fwd_lcores_config();
813 set_def_peer_eth_addrs();
814 set_default_fwd_ports_config();
817 #ifndef RTE_EXEC_ENV_WINDOWS
818 /* extremely pessimistic estimation of memory required to create a mempool */
820 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
822 unsigned int n_pages, mbuf_per_pg, leftover;
823 uint64_t total_mem, mbuf_mem, obj_sz;
825 /* there is no good way to predict how much space the mempool will
826 * occupy because it will allocate chunks on the fly, and some of those
827 * will come from default DPDK memory while some will come from our
828 * external memory, so just assume 128MB will be enough for everyone.
830 uint64_t hdr_mem = 128 << 20;
832 /* account for possible non-contiguousness */
833 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
835 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
839 mbuf_per_pg = pgsz / obj_sz;
840 leftover = (nb_mbufs % mbuf_per_pg) > 0;
841 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
843 mbuf_mem = n_pages * pgsz;
845 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
847 if (total_mem > SIZE_MAX) {
848 TESTPMD_LOG(ERR, "Memory size too big\n");
851 *out = (size_t)total_mem;
857 pagesz_flags(uint64_t page_sz)
859 /* as per mmap() manpage, all page sizes are log2 of page size
860 * shifted by MAP_HUGE_SHIFT
862 int log2 = rte_log2_u64(page_sz);
864 return (log2 << HUGE_SHIFT);
868 alloc_mem(size_t memsz, size_t pgsz, bool huge)
873 /* allocate anonymous hugepages */
874 flags = MAP_ANONYMOUS | MAP_PRIVATE;
876 flags |= HUGE_FLAG | pagesz_flags(pgsz);
878 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
879 if (addr == MAP_FAILED)
885 struct extmem_param {
889 rte_iova_t *iova_table;
890 unsigned int iova_table_len;
894 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
897 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
898 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
899 unsigned int cur_page, n_pages, pgsz_idx;
900 size_t mem_sz, cur_pgsz;
901 rte_iova_t *iovas = NULL;
905 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
906 /* skip anything that is too big */
907 if (pgsizes[pgsz_idx] > SIZE_MAX)
910 cur_pgsz = pgsizes[pgsz_idx];
912 /* if we were told not to allocate hugepages, override */
914 cur_pgsz = sysconf(_SC_PAGESIZE);
916 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
918 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
922 /* allocate our memory */
923 addr = alloc_mem(mem_sz, cur_pgsz, huge);
925 /* if we couldn't allocate memory with a specified page size,
926 * that doesn't mean we can't do it with other page sizes, so
932 /* store IOVA addresses for every page in this memory area */
933 n_pages = mem_sz / cur_pgsz;
935 iovas = malloc(sizeof(*iovas) * n_pages);
938 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
941 /* lock memory if it's not huge pages */
945 /* populate IOVA addresses */
946 for (cur_page = 0; cur_page < n_pages; cur_page++) {
951 offset = cur_pgsz * cur_page;
952 cur = RTE_PTR_ADD(addr, offset);
954 /* touch the page before getting its IOVA */
955 *(volatile char *)cur = 0;
957 iova = rte_mem_virt2iova(cur);
959 iovas[cur_page] = iova;
964 /* if we couldn't allocate anything */
970 param->pgsz = cur_pgsz;
971 param->iova_table = iovas;
972 param->iova_table_len = n_pages;
978 munmap(addr, mem_sz);
984 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
986 struct extmem_param param;
989 memset(¶m, 0, sizeof(param));
991 /* check if our heap exists */
992 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
994 /* create our heap */
995 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
997 TESTPMD_LOG(ERR, "Cannot create heap\n");
1002 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
1004 TESTPMD_LOG(ERR, "Cannot create memory area\n");
1008 /* we now have a valid memory area, so add it to heap */
1009 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1010 param.addr, param.len, param.iova_table,
1011 param.iova_table_len, param.pgsz);
1013 /* when using VFIO, memory is automatically mapped for DMA by EAL */
1015 /* not needed any more */
1016 free(param.iova_table);
1019 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1020 munmap(param.addr, param.len);
1026 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1032 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1033 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1038 RTE_ETH_FOREACH_DEV(pid) {
1039 struct rte_eth_dev_info dev_info;
1041 ret = eth_dev_info_get_print_err(pid, &dev_info);
1044 "unable to get device info for port %d on addr 0x%p,"
1045 "mempool unmapping will not be performed\n",
1050 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
1053 "unable to DMA unmap addr 0x%p "
1055 memhdr->addr, dev_info.device->name);
1058 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1061 "unable to un-register addr 0x%p\n", memhdr->addr);
1066 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1067 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1070 size_t page_size = sysconf(_SC_PAGESIZE);
1073 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1077 "unable to register addr 0x%p\n", memhdr->addr);
1080 RTE_ETH_FOREACH_DEV(pid) {
1081 struct rte_eth_dev_info dev_info;
1083 ret = eth_dev_info_get_print_err(pid, &dev_info);
1086 "unable to get device info for port %d on addr 0x%p,"
1087 "mempool mapping will not be performed\n",
1091 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1094 "unable to DMA map addr 0x%p "
1096 memhdr->addr, dev_info.device->name);
1103 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1104 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1106 struct rte_pktmbuf_extmem *xmem;
1107 unsigned int ext_num, zone_num, elt_num;
1110 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1111 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1112 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1114 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1116 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1117 "external buffer descriptors\n");
1121 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1122 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1123 const struct rte_memzone *mz;
1124 char mz_name[RTE_MEMZONE_NAMESIZE];
1127 ret = snprintf(mz_name, sizeof(mz_name),
1128 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1129 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1130 errno = ENAMETOOLONG;
1134 mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
1136 RTE_MEMZONE_IOVA_CONTIG |
1138 RTE_MEMZONE_SIZE_HINT_ONLY);
1141 * The caller exits on external buffer creation
1142 * error, so there is no need to free memzones.
1148 xseg->buf_ptr = mz->addr;
1149 xseg->buf_iova = mz->iova;
1150 xseg->buf_len = EXTBUF_ZONE_SIZE;
1151 xseg->elt_size = elt_size;
1153 if (ext_num == 0 && xmem != NULL) {
1162 * Configuration initialisation done once at init time.
1164 static struct rte_mempool *
1165 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1166 unsigned int socket_id, uint16_t size_idx)
1168 char pool_name[RTE_MEMPOOL_NAMESIZE];
1169 struct rte_mempool *rte_mp = NULL;
1170 #ifndef RTE_EXEC_ENV_WINDOWS
1173 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1175 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1176 if (!is_proc_primary()) {
1177 rte_mp = rte_mempool_lookup(pool_name);
1179 rte_exit(EXIT_FAILURE,
1180 "Get mbuf pool for socket %u failed: %s\n",
1181 socket_id, rte_strerror(rte_errno));
1186 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1187 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1189 switch (mp_alloc_type) {
1190 case MP_ALLOC_NATIVE:
1192 /* wrapper to rte_mempool_create() */
1193 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1194 rte_mbuf_best_mempool_ops());
1195 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1196 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1199 #ifndef RTE_EXEC_ENV_WINDOWS
1202 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1203 mb_size, (unsigned int) mb_mempool_cache,
1204 sizeof(struct rte_pktmbuf_pool_private),
1205 socket_id, mempool_flags);
1209 if (rte_mempool_populate_anon(rte_mp) == 0) {
1210 rte_mempool_free(rte_mp);
1214 rte_pktmbuf_pool_init(rte_mp, NULL);
1215 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1216 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1220 case MP_ALLOC_XMEM_HUGE:
1223 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1225 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1226 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1229 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1230 if (heap_socket < 0)
1231 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1233 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1234 rte_mbuf_best_mempool_ops());
1235 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1236 mb_mempool_cache, 0, mbuf_seg_size,
1243 struct rte_pktmbuf_extmem *ext_mem;
1244 unsigned int ext_num;
1246 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1247 socket_id, pool_name, &ext_mem);
1249 rte_exit(EXIT_FAILURE,
1250 "Can't create pinned data buffers\n");
1252 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1253 rte_mbuf_best_mempool_ops());
1254 rte_mp = rte_pktmbuf_pool_create_extbuf
1255 (pool_name, nb_mbuf, mb_mempool_cache,
1256 0, mbuf_seg_size, socket_id,
1263 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1267 #ifndef RTE_EXEC_ENV_WINDOWS
1270 if (rte_mp == NULL) {
1271 rte_exit(EXIT_FAILURE,
1272 "Creation of mbuf pool for socket %u failed: %s\n",
1273 socket_id, rte_strerror(rte_errno));
1274 } else if (verbose_level > 0) {
1275 rte_mempool_dump(stdout, rte_mp);
1281 * Check given socket id is valid or not with NUMA mode,
1282 * if valid, return 0, else return -1
1285 check_socket_id(const unsigned int socket_id)
1287 static int warning_once = 0;
1289 if (new_socket_id(socket_id)) {
1290 if (!warning_once && numa_support)
1292 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1300 * Get the allowed maximum number of RX queues.
1301 * *pid return the port id which has minimal value of
1302 * max_rx_queues in all ports.
1305 get_allowed_max_nb_rxq(portid_t *pid)
1307 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1308 bool max_rxq_valid = false;
1310 struct rte_eth_dev_info dev_info;
1312 RTE_ETH_FOREACH_DEV(pi) {
1313 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1316 max_rxq_valid = true;
1317 if (dev_info.max_rx_queues < allowed_max_rxq) {
1318 allowed_max_rxq = dev_info.max_rx_queues;
1322 return max_rxq_valid ? allowed_max_rxq : 0;
1326 * Check input rxq is valid or not.
1327 * If input rxq is not greater than any of maximum number
1328 * of RX queues of all ports, it is valid.
1329 * if valid, return 0, else return -1
1332 check_nb_rxq(queueid_t rxq)
1334 queueid_t allowed_max_rxq;
1337 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1338 if (rxq > allowed_max_rxq) {
1340 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1341 rxq, allowed_max_rxq, pid);
1348 * Get the allowed maximum number of TX queues.
1349 * *pid return the port id which has minimal value of
1350 * max_tx_queues in all ports.
1353 get_allowed_max_nb_txq(portid_t *pid)
1355 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1356 bool max_txq_valid = false;
1358 struct rte_eth_dev_info dev_info;
1360 RTE_ETH_FOREACH_DEV(pi) {
1361 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1364 max_txq_valid = true;
1365 if (dev_info.max_tx_queues < allowed_max_txq) {
1366 allowed_max_txq = dev_info.max_tx_queues;
1370 return max_txq_valid ? allowed_max_txq : 0;
1374 * Check input txq is valid or not.
1375 * If input txq is not greater than any of maximum number
1376 * of TX queues of all ports, it is valid.
1377 * if valid, return 0, else return -1
1380 check_nb_txq(queueid_t txq)
1382 queueid_t allowed_max_txq;
1385 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1386 if (txq > allowed_max_txq) {
1388 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1389 txq, allowed_max_txq, pid);
1396 * Get the allowed maximum number of RXDs of every rx queue.
1397 * *pid return the port id which has minimal value of
1398 * max_rxd in all queues of all ports.
1401 get_allowed_max_nb_rxd(portid_t *pid)
1403 uint16_t allowed_max_rxd = UINT16_MAX;
1405 struct rte_eth_dev_info dev_info;
1407 RTE_ETH_FOREACH_DEV(pi) {
1408 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1411 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1412 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1416 return allowed_max_rxd;
1420 * Get the allowed minimal number of RXDs of every rx queue.
1421 * *pid return the port id which has minimal value of
1422 * min_rxd in all queues of all ports.
1425 get_allowed_min_nb_rxd(portid_t *pid)
1427 uint16_t allowed_min_rxd = 0;
1429 struct rte_eth_dev_info dev_info;
1431 RTE_ETH_FOREACH_DEV(pi) {
1432 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1435 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1436 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1441 return allowed_min_rxd;
1445 * Check input rxd is valid or not.
1446 * If input rxd is not greater than any of maximum number
1447 * of RXDs of every Rx queues and is not less than any of
1448 * minimal number of RXDs of every Rx queues, it is valid.
1449 * if valid, return 0, else return -1
1452 check_nb_rxd(queueid_t rxd)
1454 uint16_t allowed_max_rxd;
1455 uint16_t allowed_min_rxd;
1458 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1459 if (rxd > allowed_max_rxd) {
1461 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1462 rxd, allowed_max_rxd, pid);
1466 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1467 if (rxd < allowed_min_rxd) {
1469 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1470 rxd, allowed_min_rxd, pid);
1478 * Get the allowed maximum number of TXDs of every rx queues.
1479 * *pid return the port id which has minimal value of
1480 * max_txd in every tx queue.
1483 get_allowed_max_nb_txd(portid_t *pid)
1485 uint16_t allowed_max_txd = UINT16_MAX;
1487 struct rte_eth_dev_info dev_info;
1489 RTE_ETH_FOREACH_DEV(pi) {
1490 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1493 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1494 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1498 return allowed_max_txd;
1502 * Get the allowed maximum number of TXDs of every tx queues.
1503 * *pid return the port id which has minimal value of
1504 * min_txd in every tx queue.
1507 get_allowed_min_nb_txd(portid_t *pid)
1509 uint16_t allowed_min_txd = 0;
1511 struct rte_eth_dev_info dev_info;
1513 RTE_ETH_FOREACH_DEV(pi) {
1514 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1517 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1518 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1523 return allowed_min_txd;
1527 * Check input txd is valid or not.
1528 * If input txd is not greater than any of maximum number
1529 * of TXDs of every Rx queues, it is valid.
1530 * if valid, return 0, else return -1
1533 check_nb_txd(queueid_t txd)
1535 uint16_t allowed_max_txd;
1536 uint16_t allowed_min_txd;
1539 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1540 if (txd > allowed_max_txd) {
1542 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1543 txd, allowed_max_txd, pid);
1547 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1548 if (txd < allowed_min_txd) {
1550 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1551 txd, allowed_min_txd, pid);
1559 * Get the allowed maximum number of hairpin queues.
1560 * *pid return the port id which has minimal value of
1561 * max_hairpin_queues in all ports.
1564 get_allowed_max_nb_hairpinq(portid_t *pid)
1566 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1568 struct rte_eth_hairpin_cap cap;
1570 RTE_ETH_FOREACH_DEV(pi) {
1571 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1575 if (cap.max_nb_queues < allowed_max_hairpinq) {
1576 allowed_max_hairpinq = cap.max_nb_queues;
1580 return allowed_max_hairpinq;
1584 * Check input hairpin is valid or not.
1585 * If input hairpin is not greater than any of maximum number
1586 * of hairpin queues of all ports, it is valid.
1587 * if valid, return 0, else return -1
1590 check_nb_hairpinq(queueid_t hairpinq)
1592 queueid_t allowed_max_hairpinq;
1595 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1596 if (hairpinq > allowed_max_hairpinq) {
1598 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1599 hairpinq, allowed_max_hairpinq, pid);
1606 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1608 uint32_t eth_overhead;
1610 if (dev_info->max_mtu != UINT16_MAX &&
1611 dev_info->max_rx_pktlen > dev_info->max_mtu)
1612 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1614 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1616 return eth_overhead;
1620 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1622 struct rte_port *port = &ports[pid];
1626 eth_rx_metadata_negotiate_mp(pid);
1628 port->dev_conf.txmode = tx_mode;
1629 port->dev_conf.rxmode = rx_mode;
1631 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1633 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1635 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1636 port->dev_conf.txmode.offloads &=
1637 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1639 /* Apply Rx offloads configuration */
1640 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1641 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1642 /* Apply Tx offloads configuration */
1643 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1644 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1647 port->dev_conf.link_speeds = eth_link_speed;
1650 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1651 get_eth_overhead(&port->dev_info);
1653 /* set flag to initialize port/queue */
1654 port->need_reconfig = 1;
1655 port->need_reconfig_queues = 1;
1656 port->socket_id = socket_id;
1657 port->tx_metadata = 0;
1660 * Check for maximum number of segments per MTU.
1661 * Accordingly update the mbuf data size.
1663 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1664 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1665 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1668 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1669 uint16_t data_size = (mtu + eth_overhead) /
1670 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1671 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1673 if (buffer_size > mbuf_data_size[0]) {
1674 mbuf_data_size[0] = buffer_size;
1675 TESTPMD_LOG(WARNING,
1676 "Configured mbuf size of the first segment %hu\n",
1687 struct rte_mempool *mbp;
1688 unsigned int nb_mbuf_per_pool;
1691 struct rte_gro_param gro_param;
1697 /* Configuration of logical cores. */
1698 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1699 sizeof(struct fwd_lcore *) * nb_lcores,
1700 RTE_CACHE_LINE_SIZE);
1701 if (fwd_lcores == NULL) {
1702 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1703 "failed\n", nb_lcores);
1705 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1706 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1707 sizeof(struct fwd_lcore),
1708 RTE_CACHE_LINE_SIZE);
1709 if (fwd_lcores[lc_id] == NULL) {
1710 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1713 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1716 RTE_ETH_FOREACH_DEV(pid) {
1720 socket_id = port_numa[pid];
1721 if (port_numa[pid] == NUMA_NO_CONFIG) {
1722 socket_id = rte_eth_dev_socket_id(pid);
1725 * if socket_id is invalid,
1726 * set to the first available socket.
1728 if (check_socket_id(socket_id) < 0)
1729 socket_id = socket_ids[0];
1732 socket_id = (socket_num == UMA_NO_CONFIG) ?
1735 /* Apply default TxRx configuration for all ports */
1736 init_config_port_offloads(pid, socket_id);
1739 * Create pools of mbuf.
1740 * If NUMA support is disabled, create a single pool of mbuf in
1741 * socket 0 memory by default.
1742 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1744 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1745 * nb_txd can be configured at run time.
1747 if (param_total_num_mbufs)
1748 nb_mbuf_per_pool = param_total_num_mbufs;
1750 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1751 (nb_lcores * mb_mempool_cache) +
1752 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1753 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1759 for (i = 0; i < num_sockets; i++)
1760 for (j = 0; j < mbuf_data_size_n; j++)
1761 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1762 mbuf_pool_create(mbuf_data_size[j],
1768 for (i = 0; i < mbuf_data_size_n; i++)
1769 mempools[i] = mbuf_pool_create
1772 socket_num == UMA_NO_CONFIG ?
1779 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1780 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1783 * Records which Mbuf pool to use by each logical core, if needed.
1785 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1786 mbp = mbuf_pool_find(
1787 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1790 mbp = mbuf_pool_find(0, 0);
1791 fwd_lcores[lc_id]->mbp = mbp;
1793 /* initialize GSO context */
1794 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1795 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1796 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1797 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1799 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1806 /* create a gro context for each lcore */
1807 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1808 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1809 gro_param.max_item_per_flow = MAX_PKT_BURST;
1810 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1811 gro_param.socket_id = rte_lcore_to_socket_id(
1812 fwd_lcores_cpuids[lc_id]);
1813 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1814 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1815 rte_exit(EXIT_FAILURE,
1816 "rte_gro_ctx_create() failed\n");
1824 reconfig(portid_t new_port_id, unsigned socket_id)
1826 /* Reconfiguration of Ethernet ports. */
1827 init_config_port_offloads(new_port_id, socket_id);
1833 init_fwd_streams(void)
1836 struct rte_port *port;
1837 streamid_t sm_id, nb_fwd_streams_new;
1840 /* set socket id according to numa or not */
1841 RTE_ETH_FOREACH_DEV(pid) {
1843 if (nb_rxq > port->dev_info.max_rx_queues) {
1845 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1846 nb_rxq, port->dev_info.max_rx_queues);
1849 if (nb_txq > port->dev_info.max_tx_queues) {
1851 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1852 nb_txq, port->dev_info.max_tx_queues);
1856 if (port_numa[pid] != NUMA_NO_CONFIG)
1857 port->socket_id = port_numa[pid];
1859 port->socket_id = rte_eth_dev_socket_id(pid);
1862 * if socket_id is invalid,
1863 * set to the first available socket.
1865 if (check_socket_id(port->socket_id) < 0)
1866 port->socket_id = socket_ids[0];
1870 if (socket_num == UMA_NO_CONFIG)
1871 port->socket_id = 0;
1873 port->socket_id = socket_num;
1877 q = RTE_MAX(nb_rxq, nb_txq);
1880 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1883 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1884 if (nb_fwd_streams_new == nb_fwd_streams)
1887 if (fwd_streams != NULL) {
1888 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1889 if (fwd_streams[sm_id] == NULL)
1891 rte_free(fwd_streams[sm_id]);
1892 fwd_streams[sm_id] = NULL;
1894 rte_free(fwd_streams);
1899 nb_fwd_streams = nb_fwd_streams_new;
1900 if (nb_fwd_streams) {
1901 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1902 sizeof(struct fwd_stream *) * nb_fwd_streams,
1903 RTE_CACHE_LINE_SIZE);
1904 if (fwd_streams == NULL)
1905 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1906 " (struct fwd_stream *)) failed\n",
1909 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1910 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1911 " struct fwd_stream", sizeof(struct fwd_stream),
1912 RTE_CACHE_LINE_SIZE);
1913 if (fwd_streams[sm_id] == NULL)
1914 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1915 "(struct fwd_stream) failed\n");
1923 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1925 uint64_t total_burst, sburst;
1927 uint64_t burst_stats[4];
1928 uint16_t pktnb_stats[4];
1930 int burst_percent[4], sburstp;
1934 * First compute the total number of packet bursts and the
1935 * two highest numbers of bursts of the same number of packets.
1937 memset(&burst_stats, 0x0, sizeof(burst_stats));
1938 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1940 /* Show stats for 0 burst size always */
1941 total_burst = pbs->pkt_burst_spread[0];
1942 burst_stats[0] = pbs->pkt_burst_spread[0];
1945 /* Find the next 2 burst sizes with highest occurrences. */
1946 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1947 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1952 total_burst += nb_burst;
1954 if (nb_burst > burst_stats[1]) {
1955 burst_stats[2] = burst_stats[1];
1956 pktnb_stats[2] = pktnb_stats[1];
1957 burst_stats[1] = nb_burst;
1958 pktnb_stats[1] = nb_pkt;
1959 } else if (nb_burst > burst_stats[2]) {
1960 burst_stats[2] = nb_burst;
1961 pktnb_stats[2] = nb_pkt;
1964 if (total_burst == 0)
1967 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1968 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1970 printf("%d%% of other]\n", 100 - sburstp);
1974 sburst += burst_stats[i];
1975 if (sburst == total_burst) {
1976 printf("%d%% of %d pkts]\n",
1977 100 - sburstp, (int) pktnb_stats[i]);
1982 (double)burst_stats[i] / total_burst * 100;
1983 printf("%d%% of %d pkts + ",
1984 burst_percent[i], (int) pktnb_stats[i]);
1985 sburstp += burst_percent[i];
1990 fwd_stream_stats_display(streamid_t stream_id)
1992 struct fwd_stream *fs;
1993 static const char *fwd_top_stats_border = "-------";
1995 fs = fwd_streams[stream_id];
1996 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1997 (fs->fwd_dropped == 0))
1999 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
2000 "TX Port=%2d/Queue=%2d %s\n",
2001 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2002 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2003 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2004 " TX-dropped: %-14"PRIu64,
2005 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2007 /* if checksum mode */
2008 if (cur_fwd_eng == &csum_fwd_engine) {
2009 printf(" RX- bad IP checksum: %-14"PRIu64
2010 " Rx- bad L4 checksum: %-14"PRIu64
2011 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
2012 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
2013 fs->rx_bad_outer_l4_csum);
2014 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2015 fs->rx_bad_outer_ip_csum);
2020 if (record_burst_stats) {
2021 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2022 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
2027 fwd_stats_display(void)
2029 static const char *fwd_stats_border = "----------------------";
2030 static const char *acc_stats_border = "+++++++++++++++";
2032 struct fwd_stream *rx_stream;
2033 struct fwd_stream *tx_stream;
2034 uint64_t tx_dropped;
2035 uint64_t rx_bad_ip_csum;
2036 uint64_t rx_bad_l4_csum;
2037 uint64_t rx_bad_outer_l4_csum;
2038 uint64_t rx_bad_outer_ip_csum;
2039 } ports_stats[RTE_MAX_ETHPORTS];
2040 uint64_t total_rx_dropped = 0;
2041 uint64_t total_tx_dropped = 0;
2042 uint64_t total_rx_nombuf = 0;
2043 struct rte_eth_stats stats;
2044 uint64_t fwd_cycles = 0;
2045 uint64_t total_recv = 0;
2046 uint64_t total_xmit = 0;
2047 struct rte_port *port;
2053 memset(ports_stats, 0, sizeof(ports_stats));
2055 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2056 struct fwd_stream *fs = fwd_streams[sm_id];
2058 if (cur_fwd_config.nb_fwd_streams >
2059 cur_fwd_config.nb_fwd_ports) {
2060 fwd_stream_stats_display(sm_id);
2062 ports_stats[fs->tx_port].tx_stream = fs;
2063 ports_stats[fs->rx_port].rx_stream = fs;
2066 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2068 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2069 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2070 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2071 fs->rx_bad_outer_l4_csum;
2072 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2073 fs->rx_bad_outer_ip_csum;
2075 if (record_core_cycles)
2076 fwd_cycles += fs->core_cycles;
2078 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2079 pt_id = fwd_ports_ids[i];
2080 port = &ports[pt_id];
2082 ret = rte_eth_stats_get(pt_id, &stats);
2085 "%s: Error: failed to get stats (port %u): %d",
2086 __func__, pt_id, ret);
2089 stats.ipackets -= port->stats.ipackets;
2090 stats.opackets -= port->stats.opackets;
2091 stats.ibytes -= port->stats.ibytes;
2092 stats.obytes -= port->stats.obytes;
2093 stats.imissed -= port->stats.imissed;
2094 stats.oerrors -= port->stats.oerrors;
2095 stats.rx_nombuf -= port->stats.rx_nombuf;
2097 total_recv += stats.ipackets;
2098 total_xmit += stats.opackets;
2099 total_rx_dropped += stats.imissed;
2100 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2101 total_tx_dropped += stats.oerrors;
2102 total_rx_nombuf += stats.rx_nombuf;
2104 printf("\n %s Forward statistics for port %-2d %s\n",
2105 fwd_stats_border, pt_id, fwd_stats_border);
2107 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2108 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2109 stats.ipackets + stats.imissed);
2111 if (cur_fwd_eng == &csum_fwd_engine) {
2112 printf(" Bad-ipcsum: %-14"PRIu64
2113 " Bad-l4csum: %-14"PRIu64
2114 "Bad-outer-l4csum: %-14"PRIu64"\n",
2115 ports_stats[pt_id].rx_bad_ip_csum,
2116 ports_stats[pt_id].rx_bad_l4_csum,
2117 ports_stats[pt_id].rx_bad_outer_l4_csum);
2118 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2119 ports_stats[pt_id].rx_bad_outer_ip_csum);
2121 if (stats.ierrors + stats.rx_nombuf > 0) {
2122 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2123 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2126 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2127 "TX-total: %-"PRIu64"\n",
2128 stats.opackets, ports_stats[pt_id].tx_dropped,
2129 stats.opackets + ports_stats[pt_id].tx_dropped);
2131 if (record_burst_stats) {
2132 if (ports_stats[pt_id].rx_stream)
2133 pkt_burst_stats_display("RX",
2134 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2135 if (ports_stats[pt_id].tx_stream)
2136 pkt_burst_stats_display("TX",
2137 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2140 printf(" %s--------------------------------%s\n",
2141 fwd_stats_border, fwd_stats_border);
2144 printf("\n %s Accumulated forward statistics for all ports"
2146 acc_stats_border, acc_stats_border);
2147 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2149 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2151 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2152 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2153 if (total_rx_nombuf > 0)
2154 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2155 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2157 acc_stats_border, acc_stats_border);
2158 if (record_core_cycles) {
2159 #define CYC_PER_MHZ 1E6
2160 if (total_recv > 0 || total_xmit > 0) {
2161 uint64_t total_pkts = 0;
2162 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2163 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2164 total_pkts = total_xmit;
2166 total_pkts = total_recv;
2168 printf("\n CPU cycles/packet=%.2F (total cycles="
2169 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2171 (double) fwd_cycles / total_pkts,
2172 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2173 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2179 fwd_stats_reset(void)
2186 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2187 pt_id = fwd_ports_ids[i];
2188 ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2191 "%s: Error: failed to clear stats (port %u):%d",
2192 __func__, pt_id, ret);
2194 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2195 struct fwd_stream *fs = fwd_streams[sm_id];
2199 fs->fwd_dropped = 0;
2200 fs->rx_bad_ip_csum = 0;
2201 fs->rx_bad_l4_csum = 0;
2202 fs->rx_bad_outer_l4_csum = 0;
2203 fs->rx_bad_outer_ip_csum = 0;
2205 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2206 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2207 fs->core_cycles = 0;
2212 flush_fwd_rx_queues(void)
2214 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2221 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2222 uint64_t timer_period;
2224 if (num_procs > 1) {
2225 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2229 /* convert to number of cycles */
2230 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2232 for (j = 0; j < 2; j++) {
2233 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2234 for (rxq = 0; rxq < nb_rxq; rxq++) {
2235 port_id = fwd_ports_ids[rxp];
2237 * testpmd can stuck in the below do while loop
2238 * if rte_eth_rx_burst() always returns nonzero
2239 * packets. So timer is added to exit this loop
2240 * after 1sec timer expiry.
2242 prev_tsc = rte_rdtsc();
2244 nb_rx = rte_eth_rx_burst(port_id, rxq,
2245 pkts_burst, MAX_PKT_BURST);
2246 for (i = 0; i < nb_rx; i++)
2247 rte_pktmbuf_free(pkts_burst[i]);
2249 cur_tsc = rte_rdtsc();
2250 diff_tsc = cur_tsc - prev_tsc;
2251 timer_tsc += diff_tsc;
2252 } while ((nb_rx > 0) &&
2253 (timer_tsc < timer_period));
2257 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2262 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2264 struct fwd_stream **fsm;
2267 #ifdef RTE_LIB_BITRATESTATS
2268 uint64_t tics_per_1sec;
2269 uint64_t tics_datum;
2270 uint64_t tics_current;
2271 uint16_t i, cnt_ports;
2273 cnt_ports = nb_ports;
2274 tics_datum = rte_rdtsc();
2275 tics_per_1sec = rte_get_timer_hz();
2277 fsm = &fwd_streams[fc->stream_idx];
2278 nb_fs = fc->stream_nb;
2280 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2281 (*pkt_fwd)(fsm[sm_id]);
2282 #ifdef RTE_LIB_BITRATESTATS
2283 if (bitrate_enabled != 0 &&
2284 bitrate_lcore_id == rte_lcore_id()) {
2285 tics_current = rte_rdtsc();
2286 if (tics_current - tics_datum >= tics_per_1sec) {
2287 /* Periodic bitrate calculation */
2288 for (i = 0; i < cnt_ports; i++)
2289 rte_stats_bitrate_calc(bitrate_data,
2291 tics_datum = tics_current;
2295 #ifdef RTE_LIB_LATENCYSTATS
2296 if (latencystats_enabled != 0 &&
2297 latencystats_lcore_id == rte_lcore_id())
2298 rte_latencystats_update();
2301 } while (! fc->stopped);
2305 start_pkt_forward_on_core(void *fwd_arg)
2307 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2308 cur_fwd_config.fwd_eng->packet_fwd);
2313 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2314 * Used to start communication flows in network loopback test configurations.
2317 run_one_txonly_burst_on_core(void *fwd_arg)
2319 struct fwd_lcore *fwd_lc;
2320 struct fwd_lcore tmp_lcore;
2322 fwd_lc = (struct fwd_lcore *) fwd_arg;
2323 tmp_lcore = *fwd_lc;
2324 tmp_lcore.stopped = 1;
2325 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2330 * Launch packet forwarding:
2331 * - Setup per-port forwarding context.
2332 * - launch logical cores with their forwarding configuration.
2335 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2341 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2342 lc_id = fwd_lcores_cpuids[i];
2343 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2344 fwd_lcores[i]->stopped = 0;
2345 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2346 fwd_lcores[i], lc_id);
2349 "launch lcore %u failed - diag=%d\n",
2356 * Launch packet forwarding configuration.
2359 start_packet_forwarding(int with_tx_first)
2361 port_fwd_begin_t port_fwd_begin;
2362 port_fwd_end_t port_fwd_end;
2365 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2366 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2368 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2369 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2371 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2372 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2373 (!nb_rxq || !nb_txq))
2374 rte_exit(EXIT_FAILURE,
2375 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2376 cur_fwd_eng->fwd_mode_name);
2378 if (all_ports_started() == 0) {
2379 fprintf(stderr, "Not all ports were started\n");
2382 if (test_done == 0) {
2383 fprintf(stderr, "Packet forwarding already started\n");
2389 pkt_fwd_config_display(&cur_fwd_config);
2390 if (!pkt_fwd_shared_rxq_check())
2393 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2394 if (port_fwd_begin != NULL) {
2395 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2396 if (port_fwd_begin(fwd_ports_ids[i])) {
2398 "Packet forwarding is not ready\n");
2404 if (with_tx_first) {
2405 port_fwd_begin = tx_only_engine.port_fwd_begin;
2406 if (port_fwd_begin != NULL) {
2407 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2408 if (port_fwd_begin(fwd_ports_ids[i])) {
2410 "Packet forwarding is not ready\n");
2420 flush_fwd_rx_queues();
2422 rxtx_config_display();
2425 if (with_tx_first) {
2426 while (with_tx_first--) {
2427 launch_packet_forwarding(
2428 run_one_txonly_burst_on_core);
2429 rte_eal_mp_wait_lcore();
2431 port_fwd_end = tx_only_engine.port_fwd_end;
2432 if (port_fwd_end != NULL) {
2433 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2434 (*port_fwd_end)(fwd_ports_ids[i]);
2437 launch_packet_forwarding(start_pkt_forward_on_core);
2441 stop_packet_forwarding(void)
2443 port_fwd_end_t port_fwd_end;
2449 fprintf(stderr, "Packet forwarding not started\n");
2452 printf("Telling cores to stop...");
2453 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2454 fwd_lcores[lc_id]->stopped = 1;
2455 printf("\nWaiting for lcores to finish...\n");
2456 rte_eal_mp_wait_lcore();
2457 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2458 if (port_fwd_end != NULL) {
2459 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2460 pt_id = fwd_ports_ids[i];
2461 (*port_fwd_end)(pt_id);
2465 fwd_stats_display();
2467 printf("\nDone.\n");
2472 dev_set_link_up(portid_t pid)
2474 if (rte_eth_dev_set_link_up(pid) < 0)
2475 fprintf(stderr, "\nSet link up fail.\n");
2479 dev_set_link_down(portid_t pid)
2481 if (rte_eth_dev_set_link_down(pid) < 0)
2482 fprintf(stderr, "\nSet link down fail.\n");
2486 all_ports_started(void)
2489 struct rte_port *port;
2491 RTE_ETH_FOREACH_DEV(pi) {
2493 /* Check if there is a port which is not started */
2494 if ((port->port_status != RTE_PORT_STARTED) &&
2495 (port->slave_flag == 0))
2499 /* No port is not started */
2504 port_is_stopped(portid_t port_id)
2506 struct rte_port *port = &ports[port_id];
2508 if ((port->port_status != RTE_PORT_STOPPED) &&
2509 (port->slave_flag == 0))
2515 all_ports_stopped(void)
2519 RTE_ETH_FOREACH_DEV(pi) {
2520 if (!port_is_stopped(pi))
2528 port_is_started(portid_t port_id)
2530 if (port_id_is_invalid(port_id, ENABLED_WARN))
2533 if (ports[port_id].port_status != RTE_PORT_STARTED)
2539 /* Configure the Rx and Tx hairpin queues for the selected port. */
2541 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2544 struct rte_eth_hairpin_conf hairpin_conf = {
2549 struct rte_port *port = &ports[pi];
2550 uint16_t peer_rx_port = pi;
2551 uint16_t peer_tx_port = pi;
2552 uint32_t manual = 1;
2553 uint32_t tx_exp = hairpin_mode & 0x10;
2555 if (!(hairpin_mode & 0xf)) {
2559 } else if (hairpin_mode & 0x1) {
2560 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2561 RTE_ETH_DEV_NO_OWNER);
2562 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2563 peer_tx_port = rte_eth_find_next_owned_by(0,
2564 RTE_ETH_DEV_NO_OWNER);
2565 if (p_pi != RTE_MAX_ETHPORTS) {
2566 peer_rx_port = p_pi;
2570 /* Last port will be the peer RX port of the first. */
2571 RTE_ETH_FOREACH_DEV(next_pi)
2572 peer_rx_port = next_pi;
2575 } else if (hairpin_mode & 0x2) {
2577 peer_rx_port = p_pi;
2579 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2580 RTE_ETH_DEV_NO_OWNER);
2581 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2584 peer_tx_port = peer_rx_port;
2588 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2589 hairpin_conf.peers[0].port = peer_rx_port;
2590 hairpin_conf.peers[0].queue = i + nb_rxq;
2591 hairpin_conf.manual_bind = !!manual;
2592 hairpin_conf.tx_explicit = !!tx_exp;
2593 diag = rte_eth_tx_hairpin_queue_setup
2594 (pi, qi, nb_txd, &hairpin_conf);
2599 /* Fail to setup rx queue, return */
2600 if (port->port_status == RTE_PORT_HANDLING)
2601 port->port_status = RTE_PORT_STOPPED;
2604 "Port %d can not be set back to stopped\n", pi);
2605 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2607 /* try to reconfigure queues next time */
2608 port->need_reconfig_queues = 1;
2611 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2612 hairpin_conf.peers[0].port = peer_tx_port;
2613 hairpin_conf.peers[0].queue = i + nb_txq;
2614 hairpin_conf.manual_bind = !!manual;
2615 hairpin_conf.tx_explicit = !!tx_exp;
2616 diag = rte_eth_rx_hairpin_queue_setup
2617 (pi, qi, nb_rxd, &hairpin_conf);
2622 /* Fail to setup rx queue, return */
2623 if (port->port_status == RTE_PORT_HANDLING)
2624 port->port_status = RTE_PORT_STOPPED;
2627 "Port %d can not be set back to stopped\n", pi);
2628 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2630 /* try to reconfigure queues next time */
2631 port->need_reconfig_queues = 1;
2637 /* Configure the Rx with optional split. */
2639 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2640 uint16_t nb_rx_desc, unsigned int socket_id,
2641 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2643 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2644 unsigned int i, mp_n;
2647 if (rx_pkt_nb_segs <= 1 ||
2648 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2649 rx_conf->rx_seg = NULL;
2650 rx_conf->rx_nseg = 0;
2651 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2652 nb_rx_desc, socket_id,
2656 for (i = 0; i < rx_pkt_nb_segs; i++) {
2657 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2658 struct rte_mempool *mpx;
2660 * Use last valid pool for the segments with number
2661 * exceeding the pool index.
2663 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2664 mpx = mbuf_pool_find(socket_id, mp_n);
2665 /* Handle zero as mbuf data buffer size. */
2666 rx_seg->length = rx_pkt_seg_lengths[i] ?
2667 rx_pkt_seg_lengths[i] :
2668 mbuf_data_size[mp_n];
2669 rx_seg->offset = i < rx_pkt_nb_offs ?
2670 rx_pkt_seg_offsets[i] : 0;
2671 rx_seg->mp = mpx ? mpx : mp;
2673 rx_conf->rx_nseg = rx_pkt_nb_segs;
2674 rx_conf->rx_seg = rx_useg;
2675 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2676 socket_id, rx_conf, NULL);
2677 rx_conf->rx_seg = NULL;
2678 rx_conf->rx_nseg = 0;
2683 alloc_xstats_display_info(portid_t pi)
2685 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2686 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2687 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2689 if (xstats_display_num == 0)
2692 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2693 if (*ids_supp == NULL)
2696 *prev_values = calloc(xstats_display_num,
2697 sizeof(**prev_values));
2698 if (*prev_values == NULL)
2699 goto fail_prev_values;
2701 *curr_values = calloc(xstats_display_num,
2702 sizeof(**curr_values));
2703 if (*curr_values == NULL)
2704 goto fail_curr_values;
2706 ports[pi].xstats_info.allocated = true;
2719 free_xstats_display_info(portid_t pi)
2721 if (!ports[pi].xstats_info.allocated)
2723 free(ports[pi].xstats_info.ids_supp);
2724 free(ports[pi].xstats_info.prev_values);
2725 free(ports[pi].xstats_info.curr_values);
2726 ports[pi].xstats_info.allocated = false;
2729 /** Fill helper structures for specified port to show extended statistics. */
2731 fill_xstats_display_info_for_port(portid_t pi)
2733 unsigned int stat, stat_supp;
2734 const char *xstat_name;
2735 struct rte_port *port;
2739 if (xstats_display_num == 0)
2742 if (pi == (portid_t)RTE_PORT_ALL) {
2743 fill_xstats_display_info();
2748 if (port->port_status != RTE_PORT_STARTED)
2751 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2752 rte_exit(EXIT_FAILURE,
2753 "Failed to allocate xstats display memory\n");
2755 ids_supp = port->xstats_info.ids_supp;
2756 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2757 xstat_name = xstats_display[stat].name;
2758 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2759 ids_supp + stat_supp);
2761 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2762 xstat_name, pi, stat);
2768 port->xstats_info.ids_supp_sz = stat_supp;
2771 /** Fill helper structures for all ports to show extended statistics. */
2773 fill_xstats_display_info(void)
2777 if (xstats_display_num == 0)
2780 RTE_ETH_FOREACH_DEV(pi)
2781 fill_xstats_display_info_for_port(pi);
2785 start_port(portid_t pid)
2787 int diag, need_check_link_status = -1;
2789 portid_t p_pi = RTE_MAX_ETHPORTS;
2790 portid_t pl[RTE_MAX_ETHPORTS];
2791 portid_t peer_pl[RTE_MAX_ETHPORTS];
2792 uint16_t cnt_pi = 0;
2793 uint16_t cfg_pi = 0;
2796 struct rte_port *port;
2797 struct rte_eth_hairpin_cap cap;
2799 if (port_id_is_invalid(pid, ENABLED_WARN))
2802 RTE_ETH_FOREACH_DEV(pi) {
2803 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2806 if (port_is_bonding_slave(pi)) {
2808 "Please remove port %d from bonded device.\n",
2813 need_check_link_status = 0;
2815 if (port->port_status == RTE_PORT_STOPPED)
2816 port->port_status = RTE_PORT_HANDLING;
2818 fprintf(stderr, "Port %d is now not stopped\n", pi);
2822 if (port->need_reconfig > 0) {
2823 struct rte_eth_conf dev_conf;
2826 port->need_reconfig = 0;
2828 if (flow_isolate_all) {
2829 int ret = port_flow_isolate(pi, 1);
2832 "Failed to apply isolated mode on port %d\n",
2837 configure_rxtx_dump_callbacks(0);
2838 printf("Configuring Port %d (socket %u)\n", pi,
2840 if (nb_hairpinq > 0 &&
2841 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2843 "Port %d doesn't support hairpin queues\n",
2848 /* configure port */
2849 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2850 nb_txq + nb_hairpinq,
2853 if (port->port_status == RTE_PORT_HANDLING)
2854 port->port_status = RTE_PORT_STOPPED;
2857 "Port %d can not be set back to stopped\n",
2859 fprintf(stderr, "Fail to configure port %d\n",
2861 /* try to reconfigure port next time */
2862 port->need_reconfig = 1;
2865 /* get device configuration*/
2867 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2869 "port %d can not get device configuration\n",
2873 /* Apply Rx offloads configuration */
2874 if (dev_conf.rxmode.offloads !=
2875 port->dev_conf.rxmode.offloads) {
2876 port->dev_conf.rxmode.offloads |=
2877 dev_conf.rxmode.offloads;
2879 k < port->dev_info.max_rx_queues;
2881 port->rx_conf[k].offloads |=
2882 dev_conf.rxmode.offloads;
2884 /* Apply Tx offloads configuration */
2885 if (dev_conf.txmode.offloads !=
2886 port->dev_conf.txmode.offloads) {
2887 port->dev_conf.txmode.offloads |=
2888 dev_conf.txmode.offloads;
2890 k < port->dev_info.max_tx_queues;
2892 port->tx_conf[k].offloads |=
2893 dev_conf.txmode.offloads;
2896 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2897 port->need_reconfig_queues = 0;
2898 /* setup tx queues */
2899 for (qi = 0; qi < nb_txq; qi++) {
2900 if ((numa_support) &&
2901 (txring_numa[pi] != NUMA_NO_CONFIG))
2902 diag = rte_eth_tx_queue_setup(pi, qi,
2903 port->nb_tx_desc[qi],
2905 &(port->tx_conf[qi]));
2907 diag = rte_eth_tx_queue_setup(pi, qi,
2908 port->nb_tx_desc[qi],
2910 &(port->tx_conf[qi]));
2915 /* Fail to setup tx queue, return */
2916 if (port->port_status == RTE_PORT_HANDLING)
2917 port->port_status = RTE_PORT_STOPPED;
2920 "Port %d can not be set back to stopped\n",
2923 "Fail to configure port %d tx queues\n",
2925 /* try to reconfigure queues next time */
2926 port->need_reconfig_queues = 1;
2929 for (qi = 0; qi < nb_rxq; qi++) {
2930 /* setup rx queues */
2931 if ((numa_support) &&
2932 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2933 struct rte_mempool * mp =
2935 (rxring_numa[pi], 0);
2938 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2943 diag = rx_queue_setup(pi, qi,
2944 port->nb_rx_desc[qi],
2946 &(port->rx_conf[qi]),
2949 struct rte_mempool *mp =
2951 (port->socket_id, 0);
2954 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2958 diag = rx_queue_setup(pi, qi,
2959 port->nb_rx_desc[qi],
2961 &(port->rx_conf[qi]),
2967 /* Fail to setup rx queue, return */
2968 if (port->port_status == RTE_PORT_HANDLING)
2969 port->port_status = RTE_PORT_STOPPED;
2972 "Port %d can not be set back to stopped\n",
2975 "Fail to configure port %d rx queues\n",
2977 /* try to reconfigure queues next time */
2978 port->need_reconfig_queues = 1;
2981 /* setup hairpin queues */
2982 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2985 configure_rxtx_dump_callbacks(verbose_level);
2987 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2991 "Port %d: Failed to disable Ptype parsing\n",
2999 diag = eth_dev_start_mp(pi);
3001 fprintf(stderr, "Fail to start port %d: %s\n",
3002 pi, rte_strerror(-diag));
3004 /* Fail to setup rx queue, return */
3005 if (port->port_status == RTE_PORT_HANDLING)
3006 port->port_status = RTE_PORT_STOPPED;
3009 "Port %d can not be set back to stopped\n",
3014 if (port->port_status == RTE_PORT_HANDLING)
3015 port->port_status = RTE_PORT_STARTED;
3017 fprintf(stderr, "Port %d can not be set into started\n",
3020 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3021 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3022 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3024 /* at least one port started, need checking link status */
3025 need_check_link_status = 1;
3030 if (need_check_link_status == 1 && !no_link_check)
3031 check_all_ports_link_status(RTE_PORT_ALL);
3032 else if (need_check_link_status == 0)
3033 fprintf(stderr, "Please stop the ports first\n");
3035 if (hairpin_mode & 0xf) {
3039 /* bind all started hairpin ports */
3040 for (i = 0; i < cfg_pi; i++) {
3042 /* bind current Tx to all peer Rx */
3043 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3044 RTE_MAX_ETHPORTS, 1);
3047 for (j = 0; j < peer_pi; j++) {
3048 if (!port_is_started(peer_pl[j]))
3050 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
3053 "Error during binding hairpin Tx port %u to %u: %s\n",
3055 rte_strerror(-diag));
3059 /* bind all peer Tx to current Rx */
3060 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3061 RTE_MAX_ETHPORTS, 0);
3064 for (j = 0; j < peer_pi; j++) {
3065 if (!port_is_started(peer_pl[j]))
3067 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
3070 "Error during binding hairpin Tx port %u to %u: %s\n",
3072 rte_strerror(-diag));
3079 fill_xstats_display_info_for_port(pid);
3086 stop_port(portid_t pid)
3089 struct rte_port *port;
3090 int need_check_link_status = 0;
3091 portid_t peer_pl[RTE_MAX_ETHPORTS];
3094 if (port_id_is_invalid(pid, ENABLED_WARN))
3097 printf("Stopping ports...\n");
3099 RTE_ETH_FOREACH_DEV(pi) {
3100 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3103 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3105 "Please remove port %d from forwarding configuration.\n",
3110 if (port_is_bonding_slave(pi)) {
3112 "Please remove port %d from bonded device.\n",
3118 if (port->port_status == RTE_PORT_STARTED)
3119 port->port_status = RTE_PORT_HANDLING;
3123 if (hairpin_mode & 0xf) {
3126 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3127 /* unbind all peer Tx from current Rx */
3128 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3129 RTE_MAX_ETHPORTS, 0);
3132 for (j = 0; j < peer_pi; j++) {
3133 if (!port_is_started(peer_pl[j]))
3135 rte_eth_hairpin_unbind(peer_pl[j], pi);
3139 if (port->flow_list)
3140 port_flow_flush(pi);
3142 if (eth_dev_stop_mp(pi) != 0)
3143 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3146 if (port->port_status == RTE_PORT_HANDLING)
3147 port->port_status = RTE_PORT_STOPPED;
3149 fprintf(stderr, "Port %d can not be set into stopped\n",
3151 need_check_link_status = 1;
3153 if (need_check_link_status && !no_link_check)
3154 check_all_ports_link_status(RTE_PORT_ALL);
3160 remove_invalid_ports_in(portid_t *array, portid_t *total)
3163 portid_t new_total = 0;
3165 for (i = 0; i < *total; i++)
3166 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3167 array[new_total] = array[i];
3174 remove_invalid_ports(void)
3176 remove_invalid_ports_in(ports_ids, &nb_ports);
3177 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3178 nb_cfg_ports = nb_fwd_ports;
3182 close_port(portid_t pid)
3185 struct rte_port *port;
3187 if (port_id_is_invalid(pid, ENABLED_WARN))
3190 printf("Closing ports...\n");
3192 RTE_ETH_FOREACH_DEV(pi) {
3193 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3196 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3198 "Please remove port %d from forwarding configuration.\n",
3203 if (port_is_bonding_slave(pi)) {
3205 "Please remove port %d from bonded device.\n",
3211 if (port->port_status == RTE_PORT_CLOSED) {
3212 fprintf(stderr, "Port %d is already closed\n", pi);
3216 if (is_proc_primary()) {
3217 port_flow_flush(pi);
3218 port_flex_item_flush(pi);
3219 port_action_handle_flush(pi);
3220 rte_eth_dev_close(pi);
3223 free_xstats_display_info(pi);
3226 remove_invalid_ports();
3231 reset_port(portid_t pid)
3235 struct rte_port *port;
3237 if (port_id_is_invalid(pid, ENABLED_WARN))
3240 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3241 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3243 "Can not reset port(s), please stop port(s) first.\n");
3247 printf("Resetting ports...\n");
3249 RTE_ETH_FOREACH_DEV(pi) {
3250 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3253 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3255 "Please remove port %d from forwarding configuration.\n",
3260 if (port_is_bonding_slave(pi)) {
3262 "Please remove port %d from bonded device.\n",
3267 diag = rte_eth_dev_reset(pi);
3270 port->need_reconfig = 1;
3271 port->need_reconfig_queues = 1;
3273 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3282 attach_port(char *identifier)
3285 struct rte_dev_iterator iterator;
3287 printf("Attaching a new port...\n");
3289 if (identifier == NULL) {
3290 fprintf(stderr, "Invalid parameters are specified\n");
3294 if (rte_dev_probe(identifier) < 0) {
3295 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3299 /* first attach mode: event */
3300 if (setup_on_probe_event) {
3301 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3302 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3303 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3304 ports[pi].need_setup != 0)
3305 setup_attached_port(pi);
3309 /* second attach mode: iterator */
3310 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3311 /* setup ports matching the devargs used for probing */
3312 if (port_is_forwarding(pi))
3313 continue; /* port was already attached before */
3314 setup_attached_port(pi);
3319 setup_attached_port(portid_t pi)
3321 unsigned int socket_id;
3324 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3325 /* if socket_id is invalid, set to the first available socket. */
3326 if (check_socket_id(socket_id) < 0)
3327 socket_id = socket_ids[0];
3328 reconfig(pi, socket_id);
3329 ret = rte_eth_promiscuous_enable(pi);
3332 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3333 pi, rte_strerror(-ret));
3335 ports_ids[nb_ports++] = pi;
3336 fwd_ports_ids[nb_fwd_ports++] = pi;
3337 nb_cfg_ports = nb_fwd_ports;
3338 ports[pi].need_setup = 0;
3339 ports[pi].port_status = RTE_PORT_STOPPED;
3341 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3346 detach_device(struct rte_device *dev)
3351 fprintf(stderr, "Device already removed\n");
3355 printf("Removing a device...\n");
3357 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3358 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3359 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3360 fprintf(stderr, "Port %u not stopped\n",
3364 port_flow_flush(sibling);
3368 if (rte_dev_remove(dev) < 0) {
3369 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3372 remove_invalid_ports();
3374 printf("Device is detached\n");
3375 printf("Now total ports is %d\n", nb_ports);
3381 detach_port_device(portid_t port_id)
3384 struct rte_eth_dev_info dev_info;
3386 if (port_id_is_invalid(port_id, ENABLED_WARN))
3389 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3390 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3391 fprintf(stderr, "Port not stopped\n");
3394 fprintf(stderr, "Port was not closed\n");
3397 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3400 "Failed to get device info for port %d, not detaching\n",
3404 detach_device(dev_info.device);
3408 detach_devargs(char *identifier)
3410 struct rte_dev_iterator iterator;
3411 struct rte_devargs da;
3414 printf("Removing a device...\n");
3416 memset(&da, 0, sizeof(da));
3417 if (rte_devargs_parsef(&da, "%s", identifier)) {
3418 fprintf(stderr, "cannot parse identifier\n");
3422 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3423 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3424 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3425 fprintf(stderr, "Port %u not stopped\n",
3427 rte_eth_iterator_cleanup(&iterator);
3428 rte_devargs_reset(&da);
3431 port_flow_flush(port_id);
3435 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3436 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3437 da.name, da.bus->name);
3438 rte_devargs_reset(&da);
3442 remove_invalid_ports();
3444 printf("Device %s is detached\n", identifier);
3445 printf("Now total ports is %d\n", nb_ports);
3447 rte_devargs_reset(&da);
3458 stop_packet_forwarding();
3460 #ifndef RTE_EXEC_ENV_WINDOWS
3461 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3463 if (mp_alloc_type == MP_ALLOC_ANON)
3464 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3469 if (ports != NULL) {
3471 RTE_ETH_FOREACH_DEV(pt_id) {
3472 printf("\nStopping port %d...\n", pt_id);
3476 RTE_ETH_FOREACH_DEV(pt_id) {
3477 printf("\nShutting down port %d...\n", pt_id);
3484 ret = rte_dev_event_monitor_stop();
3487 "fail to stop device event monitor.");
3491 ret = rte_dev_event_callback_unregister(NULL,
3492 dev_event_callback, NULL);
3495 "fail to unregister device event callback.\n");
3499 ret = rte_dev_hotplug_handle_disable();
3502 "fail to disable hotplug handling.\n");
3506 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3508 mempool_free_mp(mempools[i]);
3510 free(xstats_display);
3512 printf("\nBye...\n");
3515 typedef void (*cmd_func_t)(void);
3516 struct pmd_test_command {
3517 const char *cmd_name;
3518 cmd_func_t cmd_func;
3521 /* Check the link status of all ports in up to 9s, and print them finally */
3523 check_all_ports_link_status(uint32_t port_mask)
3525 #define CHECK_INTERVAL 100 /* 100ms */
3526 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3528 uint8_t count, all_ports_up, print_flag = 0;
3529 struct rte_eth_link link;
3531 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3533 printf("Checking link statuses...\n");
3535 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3537 RTE_ETH_FOREACH_DEV(portid) {
3538 if ((port_mask & (1 << portid)) == 0)
3540 memset(&link, 0, sizeof(link));
3541 ret = rte_eth_link_get_nowait(portid, &link);
3544 if (print_flag == 1)
3546 "Port %u link get failed: %s\n",
3547 portid, rte_strerror(-ret));
3550 /* print link status if flag set */
3551 if (print_flag == 1) {
3552 rte_eth_link_to_str(link_status,
3553 sizeof(link_status), &link);
3554 printf("Port %d %s\n", portid, link_status);
3557 /* clear all_ports_up flag if any link down */
3558 if (link.link_status == RTE_ETH_LINK_DOWN) {
3563 /* after finally printing all link status, get out */
3564 if (print_flag == 1)
3567 if (all_ports_up == 0) {
3569 rte_delay_ms(CHECK_INTERVAL);
3572 /* set the print_flag if all ports up or timeout */
3573 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3583 rmv_port_callback(void *arg)
3585 int need_to_start = 0;
3586 int org_no_link_check = no_link_check;
3587 portid_t port_id = (intptr_t)arg;
3588 struct rte_eth_dev_info dev_info;
3591 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3593 if (!test_done && port_is_forwarding(port_id)) {
3595 stop_packet_forwarding();
3599 no_link_check = org_no_link_check;
3601 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3604 "Failed to get device info for port %d, not detaching\n",
3607 struct rte_device *device = dev_info.device;
3608 close_port(port_id);
3609 detach_device(device); /* might be already removed or have more ports */
3612 start_packet_forwarding(0);
3615 /* This function is used by the interrupt thread */
3617 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3620 RTE_SET_USED(param);
3621 RTE_SET_USED(ret_param);
3623 if (type >= RTE_ETH_EVENT_MAX) {
3625 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3626 port_id, __func__, type);
3628 } else if (event_print_mask & (UINT32_C(1) << type)) {
3629 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3630 eth_event_desc[type]);
3635 case RTE_ETH_EVENT_NEW:
3636 ports[port_id].need_setup = 1;
3637 ports[port_id].port_status = RTE_PORT_HANDLING;
3639 case RTE_ETH_EVENT_INTR_RMV:
3640 if (port_id_is_invalid(port_id, DISABLED_WARN))
3642 if (rte_eal_alarm_set(100000,
3643 rmv_port_callback, (void *)(intptr_t)port_id))
3645 "Could not set up deferred device removal\n");
3647 case RTE_ETH_EVENT_DESTROY:
3648 ports[port_id].port_status = RTE_PORT_CLOSED;
3649 printf("Port %u is closed\n", port_id);
3658 register_eth_event_callback(void)
3661 enum rte_eth_event_type event;
3663 for (event = RTE_ETH_EVENT_UNKNOWN;
3664 event < RTE_ETH_EVENT_MAX; event++) {
3665 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3670 TESTPMD_LOG(ERR, "Failed to register callback for "
3671 "%s event\n", eth_event_desc[event]);
3679 /* This function is used by the interrupt thread */
3681 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3682 __rte_unused void *arg)
3687 if (type >= RTE_DEV_EVENT_MAX) {
3688 fprintf(stderr, "%s called upon invalid event %d\n",
3694 case RTE_DEV_EVENT_REMOVE:
3695 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3697 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3699 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3704 * Because the user's callback is invoked in eal interrupt
3705 * callback, the interrupt callback need to be finished before
3706 * it can be unregistered when detaching device. So finish
3707 * callback soon and use a deferred removal to detach device
3708 * is need. It is a workaround, once the device detaching be
3709 * moved into the eal in the future, the deferred removal could
3712 if (rte_eal_alarm_set(100000,
3713 rmv_port_callback, (void *)(intptr_t)port_id))
3715 "Could not set up deferred device removal\n");
3717 case RTE_DEV_EVENT_ADD:
3718 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3720 /* TODO: After finish kernel driver binding,
3721 * begin to attach port.
3730 rxtx_port_config(portid_t pid)
3734 struct rte_port *port = &ports[pid];
3736 for (qid = 0; qid < nb_rxq; qid++) {
3737 offloads = port->rx_conf[qid].offloads;
3738 port->rx_conf[qid] = port->dev_info.default_rxconf;
3740 if (rxq_share > 0 &&
3741 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3742 /* Non-zero share group to enable RxQ share. */
3743 port->rx_conf[qid].share_group = pid / rxq_share + 1;
3744 port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3748 port->rx_conf[qid].offloads = offloads;
3750 /* Check if any Rx parameters have been passed */
3751 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3752 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3754 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3755 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3757 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3758 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3760 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3761 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3763 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3764 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3766 port->nb_rx_desc[qid] = nb_rxd;
3769 for (qid = 0; qid < nb_txq; qid++) {
3770 offloads = port->tx_conf[qid].offloads;
3771 port->tx_conf[qid] = port->dev_info.default_txconf;
3773 port->tx_conf[qid].offloads = offloads;
3775 /* Check if any Tx parameters have been passed */
3776 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3777 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3779 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3780 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3782 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3783 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3785 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3786 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3788 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3789 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3791 port->nb_tx_desc[qid] = nb_txd;
3796 * Helper function to set MTU from frame size
3798 * port->dev_info should be set before calling this function.
3800 * return 0 on success, negative on error
3803 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3805 struct rte_port *port = &ports[portid];
3806 uint32_t eth_overhead;
3807 uint16_t mtu, new_mtu;
3809 eth_overhead = get_eth_overhead(&port->dev_info);
3811 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3812 printf("Failed to get MTU for port %u\n", portid);
3816 new_mtu = max_rx_pktlen - eth_overhead;
3821 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3823 "Failed to set MTU to %u for port %u\n",
3828 port->dev_conf.rxmode.mtu = new_mtu;
3834 init_port_config(void)
3837 struct rte_port *port;
3840 RTE_ETH_FOREACH_DEV(pid) {
3842 port->dev_conf.fdir_conf = fdir_conf;
3844 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3849 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3850 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3851 rss_hf & port->dev_info.flow_type_rss_offloads;
3853 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3854 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3857 if (port->dcb_flag == 0) {
3858 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3859 port->dev_conf.rxmode.mq_mode =
3860 (enum rte_eth_rx_mq_mode)
3861 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3863 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3864 port->dev_conf.rxmode.offloads &=
3865 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3868 i < port->dev_info.nb_rx_queues;
3870 port->rx_conf[i].offloads &=
3871 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3875 rxtx_port_config(pid);
3877 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3881 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3882 rte_pmd_ixgbe_bypass_init(pid);
3885 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3886 port->dev_conf.intr_conf.lsc = 1;
3887 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3888 port->dev_conf.intr_conf.rmv = 1;
3892 void set_port_slave_flag(portid_t slave_pid)
3894 struct rte_port *port;
3896 port = &ports[slave_pid];
3897 port->slave_flag = 1;
3900 void clear_port_slave_flag(portid_t slave_pid)
3902 struct rte_port *port;
3904 port = &ports[slave_pid];
3905 port->slave_flag = 0;
3908 uint8_t port_is_bonding_slave(portid_t slave_pid)
3910 struct rte_port *port;
3911 struct rte_eth_dev_info dev_info;
3914 port = &ports[slave_pid];
3915 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3918 "Failed to get device info for port id %d,"
3919 "cannot determine if the port is a bonded slave",
3923 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3928 const uint16_t vlan_tags[] = {
3929 0, 1, 2, 3, 4, 5, 6, 7,
3930 8, 9, 10, 11, 12, 13, 14, 15,
3931 16, 17, 18, 19, 20, 21, 22, 23,
3932 24, 25, 26, 27, 28, 29, 30, 31
3936 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3937 enum dcb_mode_enable dcb_mode,
3938 enum rte_eth_nb_tcs num_tcs,
3943 struct rte_eth_rss_conf rss_conf;
3946 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3947 * given above, and the number of traffic classes available for use.
3949 if (dcb_mode == DCB_VT_ENABLED) {
3950 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3951 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3952 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3953 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3955 /* VMDQ+DCB RX and TX configurations */
3956 vmdq_rx_conf->enable_default_pool = 0;
3957 vmdq_rx_conf->default_pool = 0;
3958 vmdq_rx_conf->nb_queue_pools =
3959 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3960 vmdq_tx_conf->nb_queue_pools =
3961 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3963 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3964 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3965 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3966 vmdq_rx_conf->pool_map[i].pools =
3967 1 << (i % vmdq_rx_conf->nb_queue_pools);
3969 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3970 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3971 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3974 /* set DCB mode of RX and TX of multiple queues */
3975 eth_conf->rxmode.mq_mode =
3976 (enum rte_eth_rx_mq_mode)
3977 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
3978 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
3980 struct rte_eth_dcb_rx_conf *rx_conf =
3981 ð_conf->rx_adv_conf.dcb_rx_conf;
3982 struct rte_eth_dcb_tx_conf *tx_conf =
3983 ð_conf->tx_adv_conf.dcb_tx_conf;
3985 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3987 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3991 rx_conf->nb_tcs = num_tcs;
3992 tx_conf->nb_tcs = num_tcs;
3994 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3995 rx_conf->dcb_tc[i] = i % num_tcs;
3996 tx_conf->dcb_tc[i] = i % num_tcs;
3999 eth_conf->rxmode.mq_mode =
4000 (enum rte_eth_rx_mq_mode)
4001 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4002 eth_conf->rx_adv_conf.rss_conf = rss_conf;
4003 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
4007 eth_conf->dcb_capability_en =
4008 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4010 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4016 init_port_dcb_config(portid_t pid,
4017 enum dcb_mode_enable dcb_mode,
4018 enum rte_eth_nb_tcs num_tcs,
4021 struct rte_eth_conf port_conf;
4022 struct rte_port *rte_port;
4026 if (num_procs > 1) {
4027 printf("The multi-process feature doesn't support dcb.\n");
4030 rte_port = &ports[pid];
4032 /* retain the original device configuration. */
4033 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4035 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
4036 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4039 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4040 /* remove RSS HASH offload for DCB in vt mode */
4041 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4042 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4043 for (i = 0; i < nb_rxq; i++)
4044 rte_port->rx_conf[i].offloads &=
4045 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4048 /* re-configure the device . */
4049 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
4053 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
4057 /* If dev_info.vmdq_pool_base is greater than 0,
4058 * the queue id of vmdq pools is started after pf queues.
4060 if (dcb_mode == DCB_VT_ENABLED &&
4061 rte_port->dev_info.vmdq_pool_base > 0) {
4063 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
4068 /* Assume the ports in testpmd have the same dcb capability
4069 * and has the same number of rxq and txq in dcb mode
4071 if (dcb_mode == DCB_VT_ENABLED) {
4072 if (rte_port->dev_info.max_vfs > 0) {
4073 nb_rxq = rte_port->dev_info.nb_rx_queues;
4074 nb_txq = rte_port->dev_info.nb_tx_queues;
4076 nb_rxq = rte_port->dev_info.max_rx_queues;
4077 nb_txq = rte_port->dev_info.max_tx_queues;
4080 /*if vt is disabled, use all pf queues */
4081 if (rte_port->dev_info.vmdq_pool_base == 0) {
4082 nb_rxq = rte_port->dev_info.max_rx_queues;
4083 nb_txq = rte_port->dev_info.max_tx_queues;
4085 nb_rxq = (queueid_t)num_tcs;
4086 nb_txq = (queueid_t)num_tcs;
4090 rx_free_thresh = 64;
4092 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4094 rxtx_port_config(pid);
4096 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4097 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4098 rx_vft_set(pid, vlan_tags[i], 1);
4100 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4104 rte_port->dcb_flag = 1;
4106 /* Enter DCB configuration status */
4117 /* Configuration of Ethernet ports. */
4118 ports = rte_zmalloc("testpmd: ports",
4119 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4120 RTE_CACHE_LINE_SIZE);
4121 if (ports == NULL) {
4122 rte_exit(EXIT_FAILURE,
4123 "rte_zmalloc(%d struct rte_port) failed\n",
4126 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4127 ports[i].xstats_info.allocated = false;
4128 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4129 LIST_INIT(&ports[i].flow_tunnel_list);
4130 /* Initialize ports NUMA structures */
4131 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4132 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4133 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4147 const char clr[] = { 27, '[', '2', 'J', '\0' };
4148 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4150 /* Clear screen and move to top left */
4151 printf("%s%s", clr, top_left);
4153 printf("\nPort statistics ====================================");
4154 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4155 nic_stats_display(fwd_ports_ids[i]);
4161 signal_handler(int signum)
4163 if (signum == SIGINT || signum == SIGTERM) {
4164 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4166 #ifdef RTE_LIB_PDUMP
4167 /* uninitialize packet capture framework */
4170 #ifdef RTE_LIB_LATENCYSTATS
4171 if (latencystats_enabled != 0)
4172 rte_latencystats_uninit();
4175 /* Set flag to indicate the force termination. */
4177 /* exit with the expected status */
4178 #ifndef RTE_EXEC_ENV_WINDOWS
4179 signal(signum, SIG_DFL);
4180 kill(getpid(), signum);
4186 main(int argc, char** argv)
4193 signal(SIGINT, signal_handler);
4194 signal(SIGTERM, signal_handler);
4196 testpmd_logtype = rte_log_register("testpmd");
4197 if (testpmd_logtype < 0)
4198 rte_exit(EXIT_FAILURE, "Cannot register log type");
4199 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4201 diag = rte_eal_init(argc, argv);
4203 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4204 rte_strerror(rte_errno));
4206 ret = register_eth_event_callback();
4208 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4210 #ifdef RTE_LIB_PDUMP
4211 /* initialize packet capture framework */
4216 RTE_ETH_FOREACH_DEV(port_id) {
4217 ports_ids[count] = port_id;
4220 nb_ports = (portid_t) count;
4222 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4224 /* allocate port structures, and init them */
4227 set_def_fwd_config();
4229 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4230 "Check the core mask argument\n");
4232 /* Bitrate/latency stats disabled by default */
4233 #ifdef RTE_LIB_BITRATESTATS
4234 bitrate_enabled = 0;
4236 #ifdef RTE_LIB_LATENCYSTATS
4237 latencystats_enabled = 0;
4240 /* on FreeBSD, mlockall() is disabled by default */
4241 #ifdef RTE_EXEC_ENV_FREEBSD
4250 launch_args_parse(argc, argv);
4252 #ifndef RTE_EXEC_ENV_WINDOWS
4253 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4254 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4259 if (tx_first && interactive)
4260 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4261 "interactive mode.\n");
4263 if (tx_first && lsc_interrupt) {
4265 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4269 if (!nb_rxq && !nb_txq)
4271 "Warning: Either rx or tx queues should be non-zero\n");
4273 if (nb_rxq > 1 && nb_rxq > nb_txq)
4275 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4281 ret = rte_dev_hotplug_handle_enable();
4284 "fail to enable hotplug handling.");
4288 ret = rte_dev_event_monitor_start();
4291 "fail to start device event monitoring.");
4295 ret = rte_dev_event_callback_register(NULL,
4296 dev_event_callback, NULL);
4299 "fail to register device event callback\n");
4304 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4305 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4307 /* set all ports to promiscuous mode by default */
4308 RTE_ETH_FOREACH_DEV(port_id) {
4309 ret = rte_eth_promiscuous_enable(port_id);
4312 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4313 port_id, rte_strerror(-ret));
4316 #ifdef RTE_LIB_METRICS
4317 /* Init metrics library */
4318 rte_metrics_init(rte_socket_id());
4321 #ifdef RTE_LIB_LATENCYSTATS
4322 if (latencystats_enabled != 0) {
4323 int ret = rte_latencystats_init(1, NULL);
4326 "Warning: latencystats init() returned error %d\n",
4328 fprintf(stderr, "Latencystats running on lcore %d\n",
4329 latencystats_lcore_id);
4333 /* Setup bitrate stats */
4334 #ifdef RTE_LIB_BITRATESTATS
4335 if (bitrate_enabled != 0) {
4336 bitrate_data = rte_stats_bitrate_create();
4337 if (bitrate_data == NULL)
4338 rte_exit(EXIT_FAILURE,
4339 "Could not allocate bitrate data.\n");
4340 rte_stats_bitrate_reg(bitrate_data);
4343 #ifdef RTE_LIB_CMDLINE
4344 if (strlen(cmdline_filename) != 0)
4345 cmdline_read_from_file(cmdline_filename);
4347 if (interactive == 1) {
4349 printf("Start automatic packet forwarding\n");
4350 start_packet_forwarding(0);
4362 printf("No commandline core given, start packet forwarding\n");
4363 start_packet_forwarding(tx_first);
4364 if (stats_period != 0) {
4365 uint64_t prev_time = 0, cur_time, diff_time = 0;
4366 uint64_t timer_period;
4368 /* Convert to number of cycles */
4369 timer_period = stats_period * rte_get_timer_hz();
4371 while (f_quit == 0) {
4372 cur_time = rte_get_timer_cycles();
4373 diff_time += cur_time - prev_time;
4375 if (diff_time >= timer_period) {
4377 /* Reset the timer */
4380 /* Sleep to avoid unnecessary checks */
4381 prev_time = cur_time;
4382 rte_delay_us_sleep(US_PER_S);
4386 printf("Press enter to exit\n");
4387 rc = read(0, &c, 1);
4393 ret = rte_eal_cleanup();
4395 rte_exit(EXIT_FAILURE,
4396 "EAL cleanup failed: %s\n", strerror(-ret));
4398 return EXIT_SUCCESS;