1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
50 #include <rte_pmd_ixgbe.h>
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIB_BITRATESTATS
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIB_LATENCYSTATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use main core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 &five_tuple_swap_fwd_engine,
183 #ifdef RTE_LIBRTE_IEEE1588
184 &ieee1588_fwd_engine,
189 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
190 uint16_t mempool_flags;
192 struct fwd_config cur_fwd_config;
193 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194 uint32_t retry_enabled;
195 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
198 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
199 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
200 DEFAULT_MBUF_DATA_SIZE
201 }; /**< Mbuf data space size. */
202 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
203 * specified on command-line. */
204 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
207 * In container, it cannot terminate the process which running with 'stats-period'
208 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
213 * Configuration of packet segments used to scatter received packets
214 * if some of split features is configured.
216 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
217 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
218 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
219 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
222 * Configuration of packet segments used by the "txonly" processing engine.
224 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226 TXONLY_DEF_PACKET_LEN,
228 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
230 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
231 /**< Split policy for packets to TX. */
233 uint8_t txonly_multi_flow;
234 /**< Whether multiple flows are generated in TXONLY mode. */
236 uint32_t tx_pkt_times_inter;
237 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
239 uint32_t tx_pkt_times_intra;
240 /**< Timings for send scheduling in TXONLY mode, time between packets. */
242 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
245 /* current configuration is in DCB or not,0 means it is not in DCB mode */
246 uint8_t dcb_config = 0;
248 /* Whether the dcb is in testing status */
249 uint8_t dcb_test = 0;
252 * Configurable number of RX/TX queues.
254 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
255 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
256 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
259 * Configurable number of RX/TX ring descriptors.
260 * Defaults are supplied by drivers via ethdev.
262 #define RTE_TEST_RX_DESC_DEFAULT 0
263 #define RTE_TEST_TX_DESC_DEFAULT 0
264 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
265 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
267 #define RTE_PMD_PARAM_UNSET -1
269 * Configurable values of RX and TX ring threshold registers.
272 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
273 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
274 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
276 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
277 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
278 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
281 * Configurable value of RX free threshold.
283 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
286 * Configurable value of RX drop enable.
288 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
291 * Configurable value of TX free threshold.
293 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
296 * Configurable value of TX RS bit threshold.
298 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
301 * Configurable value of buffered packets before sending.
303 uint16_t noisy_tx_sw_bufsz;
306 * Configurable value of packet buffer timeout.
308 uint16_t noisy_tx_sw_buf_flush_time;
311 * Configurable value for size of VNF internal memory area
312 * used for simulating noisy neighbour behaviour
314 uint64_t noisy_lkup_mem_sz;
317 * Configurable value of number of random writes done in
318 * VNF simulation memory area.
320 uint64_t noisy_lkup_num_writes;
323 * Configurable value of number of random reads done in
324 * VNF simulation memory area.
326 uint64_t noisy_lkup_num_reads;
329 * Configurable value of number of random reads/writes done in
330 * VNF simulation memory area.
332 uint64_t noisy_lkup_num_reads_writes;
335 * Receive Side Scaling (RSS) configuration.
337 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
340 * Port topology configuration
342 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
345 * Avoids to flush all the RX streams before starts forwarding.
347 uint8_t no_flush_rx = 0; /* flush by default */
350 * Flow API isolated mode.
352 uint8_t flow_isolate_all;
355 * Avoids to check link status when starting/stopping a port.
357 uint8_t no_link_check = 0; /* check by default */
360 * Don't automatically start all ports in interactive mode.
362 uint8_t no_device_start = 0;
365 * Enable link status change notification
367 uint8_t lsc_interrupt = 1; /* enabled by default */
370 * Enable device removal notification.
372 uint8_t rmv_interrupt = 1; /* enabled by default */
374 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
376 /* After attach, port setup is called on event or by iterator */
377 bool setup_on_probe_event = true;
379 /* Clear ptypes on port initialization. */
380 uint8_t clear_ptypes = true;
382 /* Hairpin ports configuration mode. */
383 uint16_t hairpin_mode;
385 /* Pretty printing of ethdev events */
386 static const char * const eth_event_desc[] = {
387 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
388 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
389 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
390 [RTE_ETH_EVENT_INTR_RESET] = "reset",
391 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
392 [RTE_ETH_EVENT_IPSEC] = "IPsec",
393 [RTE_ETH_EVENT_MACSEC] = "MACsec",
394 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
395 [RTE_ETH_EVENT_NEW] = "device probed",
396 [RTE_ETH_EVENT_DESTROY] = "device released",
397 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
398 [RTE_ETH_EVENT_MAX] = NULL,
402 * Display or mask ether events
403 * Default to all events except VF_MBOX
405 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
406 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
407 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
408 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
409 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
410 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
411 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
412 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
414 * Decide if all memory are locked for performance.
419 * NIC bypass mode configuration options.
422 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
423 /* The NIC bypass watchdog timeout. */
424 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
428 #ifdef RTE_LIB_LATENCYSTATS
431 * Set when latency stats is enabled in the commandline
433 uint8_t latencystats_enabled;
436 * Lcore ID to serive latency statistics.
438 lcoreid_t latencystats_lcore_id = -1;
443 * Ethernet device configuration.
445 struct rte_eth_rxmode rx_mode = {
446 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
447 /**< Default maximum frame length. */
450 struct rte_eth_txmode tx_mode = {
451 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
454 struct rte_fdir_conf fdir_conf = {
455 .mode = RTE_FDIR_MODE_NONE,
456 .pballoc = RTE_FDIR_PBALLOC_64K,
457 .status = RTE_FDIR_REPORT_STATUS,
459 .vlan_tci_mask = 0xFFEF,
461 .src_ip = 0xFFFFFFFF,
462 .dst_ip = 0xFFFFFFFF,
465 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
466 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
468 .src_port_mask = 0xFFFF,
469 .dst_port_mask = 0xFFFF,
470 .mac_addr_byte_mask = 0xFF,
471 .tunnel_type_mask = 1,
472 .tunnel_id_mask = 0xFFFFFFFF,
477 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
479 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
480 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
482 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
483 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
485 uint16_t nb_tx_queue_stats_mappings = 0;
486 uint16_t nb_rx_queue_stats_mappings = 0;
489 * Display zero values by default for xstats
491 uint8_t xstats_hide_zero;
494 * Measure of CPU cycles disabled by default
496 uint8_t record_core_cycles;
499 * Display of RX and TX bursts disabled by default
501 uint8_t record_burst_stats;
503 unsigned int num_sockets = 0;
504 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
506 #ifdef RTE_LIB_BITRATESTATS
507 /* Bitrate statistics */
508 struct rte_stats_bitrates *bitrate_data;
509 lcoreid_t bitrate_lcore_id;
510 uint8_t bitrate_enabled;
513 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
514 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
517 * hexadecimal bitmask of RX mq mode can be enabled.
519 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
521 /* Forward function declarations */
522 static void setup_attached_port(portid_t pi);
523 static void map_port_queue_stats_mapping_registers(portid_t pi,
524 struct rte_port *port);
525 static void check_all_ports_link_status(uint32_t port_mask);
526 static int eth_event_callback(portid_t port_id,
527 enum rte_eth_event_type type,
528 void *param, void *ret_param);
529 static void dev_event_callback(const char *device_name,
530 enum rte_dev_event_type type,
534 * Check if all the ports are started.
535 * If yes, return positive value. If not, return zero.
537 static int all_ports_started(void);
539 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
540 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
542 /* Holds the registered mbuf dynamic flags names. */
543 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
546 * Helper function to check if socket is already discovered.
547 * If yes, return positive value. If not, return zero.
550 new_socket_id(unsigned int socket_id)
554 for (i = 0; i < num_sockets; i++) {
555 if (socket_ids[i] == socket_id)
562 * Setup default configuration.
565 set_default_fwd_lcores_config(void)
569 unsigned int sock_num;
572 for (i = 0; i < RTE_MAX_LCORE; i++) {
573 if (!rte_lcore_is_enabled(i))
575 sock_num = rte_lcore_to_socket_id(i);
576 if (new_socket_id(sock_num)) {
577 if (num_sockets >= RTE_MAX_NUMA_NODES) {
578 rte_exit(EXIT_FAILURE,
579 "Total sockets greater than %u\n",
582 socket_ids[num_sockets++] = sock_num;
584 if (i == rte_get_main_lcore())
586 fwd_lcores_cpuids[nb_lc++] = i;
588 nb_lcores = (lcoreid_t) nb_lc;
589 nb_cfg_lcores = nb_lcores;
594 set_def_peer_eth_addrs(void)
598 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
599 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
600 peer_eth_addrs[i].addr_bytes[5] = i;
605 set_default_fwd_ports_config(void)
610 RTE_ETH_FOREACH_DEV(pt_id) {
611 fwd_ports_ids[i++] = pt_id;
613 /* Update sockets info according to the attached device */
614 int socket_id = rte_eth_dev_socket_id(pt_id);
615 if (socket_id >= 0 && new_socket_id(socket_id)) {
616 if (num_sockets >= RTE_MAX_NUMA_NODES) {
617 rte_exit(EXIT_FAILURE,
618 "Total sockets greater than %u\n",
621 socket_ids[num_sockets++] = socket_id;
625 nb_cfg_ports = nb_ports;
626 nb_fwd_ports = nb_ports;
630 set_def_fwd_config(void)
632 set_default_fwd_lcores_config();
633 set_def_peer_eth_addrs();
634 set_default_fwd_ports_config();
637 /* extremely pessimistic estimation of memory required to create a mempool */
639 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
641 unsigned int n_pages, mbuf_per_pg, leftover;
642 uint64_t total_mem, mbuf_mem, obj_sz;
644 /* there is no good way to predict how much space the mempool will
645 * occupy because it will allocate chunks on the fly, and some of those
646 * will come from default DPDK memory while some will come from our
647 * external memory, so just assume 128MB will be enough for everyone.
649 uint64_t hdr_mem = 128 << 20;
651 /* account for possible non-contiguousness */
652 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
654 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
658 mbuf_per_pg = pgsz / obj_sz;
659 leftover = (nb_mbufs % mbuf_per_pg) > 0;
660 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
662 mbuf_mem = n_pages * pgsz;
664 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
666 if (total_mem > SIZE_MAX) {
667 TESTPMD_LOG(ERR, "Memory size too big\n");
670 *out = (size_t)total_mem;
676 pagesz_flags(uint64_t page_sz)
678 /* as per mmap() manpage, all page sizes are log2 of page size
679 * shifted by MAP_HUGE_SHIFT
681 int log2 = rte_log2_u64(page_sz);
683 return (log2 << HUGE_SHIFT);
687 alloc_mem(size_t memsz, size_t pgsz, bool huge)
692 /* allocate anonymous hugepages */
693 flags = MAP_ANONYMOUS | MAP_PRIVATE;
695 flags |= HUGE_FLAG | pagesz_flags(pgsz);
697 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
698 if (addr == MAP_FAILED)
704 struct extmem_param {
708 rte_iova_t *iova_table;
709 unsigned int iova_table_len;
713 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
716 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
717 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
718 unsigned int cur_page, n_pages, pgsz_idx;
719 size_t mem_sz, cur_pgsz;
720 rte_iova_t *iovas = NULL;
724 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
725 /* skip anything that is too big */
726 if (pgsizes[pgsz_idx] > SIZE_MAX)
729 cur_pgsz = pgsizes[pgsz_idx];
731 /* if we were told not to allocate hugepages, override */
733 cur_pgsz = sysconf(_SC_PAGESIZE);
735 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
737 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
741 /* allocate our memory */
742 addr = alloc_mem(mem_sz, cur_pgsz, huge);
744 /* if we couldn't allocate memory with a specified page size,
745 * that doesn't mean we can't do it with other page sizes, so
751 /* store IOVA addresses for every page in this memory area */
752 n_pages = mem_sz / cur_pgsz;
754 iovas = malloc(sizeof(*iovas) * n_pages);
757 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
760 /* lock memory if it's not huge pages */
764 /* populate IOVA addresses */
765 for (cur_page = 0; cur_page < n_pages; cur_page++) {
770 offset = cur_pgsz * cur_page;
771 cur = RTE_PTR_ADD(addr, offset);
773 /* touch the page before getting its IOVA */
774 *(volatile char *)cur = 0;
776 iova = rte_mem_virt2iova(cur);
778 iovas[cur_page] = iova;
783 /* if we couldn't allocate anything */
789 param->pgsz = cur_pgsz;
790 param->iova_table = iovas;
791 param->iova_table_len = n_pages;
798 munmap(addr, mem_sz);
804 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
806 struct extmem_param param;
809 memset(¶m, 0, sizeof(param));
811 /* check if our heap exists */
812 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
814 /* create our heap */
815 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
817 TESTPMD_LOG(ERR, "Cannot create heap\n");
822 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
824 TESTPMD_LOG(ERR, "Cannot create memory area\n");
828 /* we now have a valid memory area, so add it to heap */
829 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
830 param.addr, param.len, param.iova_table,
831 param.iova_table_len, param.pgsz);
833 /* when using VFIO, memory is automatically mapped for DMA by EAL */
835 /* not needed any more */
836 free(param.iova_table);
839 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
840 munmap(param.addr, param.len);
846 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
852 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
853 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
858 RTE_ETH_FOREACH_DEV(pid) {
859 struct rte_eth_dev *dev =
860 &rte_eth_devices[pid];
862 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
866 "unable to DMA unmap addr 0x%p "
868 memhdr->addr, dev->data->name);
871 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
874 "unable to un-register addr 0x%p\n", memhdr->addr);
879 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
880 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
883 size_t page_size = sysconf(_SC_PAGESIZE);
886 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
890 "unable to register addr 0x%p\n", memhdr->addr);
893 RTE_ETH_FOREACH_DEV(pid) {
894 struct rte_eth_dev *dev =
895 &rte_eth_devices[pid];
897 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
901 "unable to DMA map addr 0x%p "
903 memhdr->addr, dev->data->name);
909 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
910 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
912 struct rte_pktmbuf_extmem *xmem;
913 unsigned int ext_num, zone_num, elt_num;
916 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
917 elt_num = EXTBUF_ZONE_SIZE / elt_size;
918 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
920 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
922 TESTPMD_LOG(ERR, "Cannot allocate memory for "
923 "external buffer descriptors\n");
927 for (ext_num = 0; ext_num < zone_num; ext_num++) {
928 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
929 const struct rte_memzone *mz;
930 char mz_name[RTE_MEMZONE_NAMESIZE];
933 ret = snprintf(mz_name, sizeof(mz_name),
934 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
935 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
936 errno = ENAMETOOLONG;
940 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
942 RTE_MEMZONE_IOVA_CONTIG |
944 RTE_MEMZONE_SIZE_HINT_ONLY,
948 * The caller exits on external buffer creation
949 * error, so there is no need to free memzones.
955 xseg->buf_ptr = mz->addr;
956 xseg->buf_iova = mz->iova;
957 xseg->buf_len = EXTBUF_ZONE_SIZE;
958 xseg->elt_size = elt_size;
960 if (ext_num == 0 && xmem != NULL) {
969 * Configuration initialisation done once at init time.
971 static struct rte_mempool *
972 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
973 unsigned int socket_id, uint16_t size_idx)
975 char pool_name[RTE_MEMPOOL_NAMESIZE];
976 struct rte_mempool *rte_mp = NULL;
979 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
980 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
983 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
984 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
986 switch (mp_alloc_type) {
987 case MP_ALLOC_NATIVE:
989 /* wrapper to rte_mempool_create() */
990 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
991 rte_mbuf_best_mempool_ops());
992 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
993 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
998 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
999 mb_size, (unsigned int) mb_mempool_cache,
1000 sizeof(struct rte_pktmbuf_pool_private),
1001 socket_id, mempool_flags);
1005 if (rte_mempool_populate_anon(rte_mp) == 0) {
1006 rte_mempool_free(rte_mp);
1010 rte_pktmbuf_pool_init(rte_mp, NULL);
1011 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1012 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1016 case MP_ALLOC_XMEM_HUGE:
1019 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1021 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1022 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1025 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1026 if (heap_socket < 0)
1027 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1029 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1030 rte_mbuf_best_mempool_ops());
1031 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1032 mb_mempool_cache, 0, mbuf_seg_size,
1038 struct rte_pktmbuf_extmem *ext_mem;
1039 unsigned int ext_num;
1041 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1042 socket_id, pool_name, &ext_mem);
1044 rte_exit(EXIT_FAILURE,
1045 "Can't create pinned data buffers\n");
1047 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1048 rte_mbuf_best_mempool_ops());
1049 rte_mp = rte_pktmbuf_pool_create_extbuf
1050 (pool_name, nb_mbuf, mb_mempool_cache,
1051 0, mbuf_seg_size, socket_id,
1058 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1063 if (rte_mp == NULL) {
1064 rte_exit(EXIT_FAILURE,
1065 "Creation of mbuf pool for socket %u failed: %s\n",
1066 socket_id, rte_strerror(rte_errno));
1067 } else if (verbose_level > 0) {
1068 rte_mempool_dump(stdout, rte_mp);
1074 * Check given socket id is valid or not with NUMA mode,
1075 * if valid, return 0, else return -1
1078 check_socket_id(const unsigned int socket_id)
1080 static int warning_once = 0;
1082 if (new_socket_id(socket_id)) {
1083 if (!warning_once && numa_support)
1084 printf("Warning: NUMA should be configured manually by"
1085 " using --port-numa-config and"
1086 " --ring-numa-config parameters along with"
1095 * Get the allowed maximum number of RX queues.
1096 * *pid return the port id which has minimal value of
1097 * max_rx_queues in all ports.
1100 get_allowed_max_nb_rxq(portid_t *pid)
1102 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1103 bool max_rxq_valid = false;
1105 struct rte_eth_dev_info dev_info;
1107 RTE_ETH_FOREACH_DEV(pi) {
1108 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1111 max_rxq_valid = true;
1112 if (dev_info.max_rx_queues < allowed_max_rxq) {
1113 allowed_max_rxq = dev_info.max_rx_queues;
1117 return max_rxq_valid ? allowed_max_rxq : 0;
1121 * Check input rxq is valid or not.
1122 * If input rxq is not greater than any of maximum number
1123 * of RX queues of all ports, it is valid.
1124 * if valid, return 0, else return -1
1127 check_nb_rxq(queueid_t rxq)
1129 queueid_t allowed_max_rxq;
1132 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1133 if (rxq > allowed_max_rxq) {
1134 printf("Fail: input rxq (%u) can't be greater "
1135 "than max_rx_queues (%u) of port %u\n",
1145 * Get the allowed maximum number of TX queues.
1146 * *pid return the port id which has minimal value of
1147 * max_tx_queues in all ports.
1150 get_allowed_max_nb_txq(portid_t *pid)
1152 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1153 bool max_txq_valid = false;
1155 struct rte_eth_dev_info dev_info;
1157 RTE_ETH_FOREACH_DEV(pi) {
1158 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1161 max_txq_valid = true;
1162 if (dev_info.max_tx_queues < allowed_max_txq) {
1163 allowed_max_txq = dev_info.max_tx_queues;
1167 return max_txq_valid ? allowed_max_txq : 0;
1171 * Check input txq is valid or not.
1172 * If input txq is not greater than any of maximum number
1173 * of TX queues of all ports, it is valid.
1174 * if valid, return 0, else return -1
1177 check_nb_txq(queueid_t txq)
1179 queueid_t allowed_max_txq;
1182 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1183 if (txq > allowed_max_txq) {
1184 printf("Fail: input txq (%u) can't be greater "
1185 "than max_tx_queues (%u) of port %u\n",
1195 * Get the allowed maximum number of RXDs of every rx queue.
1196 * *pid return the port id which has minimal value of
1197 * max_rxd in all queues of all ports.
1200 get_allowed_max_nb_rxd(portid_t *pid)
1202 uint16_t allowed_max_rxd = UINT16_MAX;
1204 struct rte_eth_dev_info dev_info;
1206 RTE_ETH_FOREACH_DEV(pi) {
1207 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1210 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1211 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1215 return allowed_max_rxd;
1219 * Get the allowed minimal number of RXDs of every rx queue.
1220 * *pid return the port id which has minimal value of
1221 * min_rxd in all queues of all ports.
1224 get_allowed_min_nb_rxd(portid_t *pid)
1226 uint16_t allowed_min_rxd = 0;
1228 struct rte_eth_dev_info dev_info;
1230 RTE_ETH_FOREACH_DEV(pi) {
1231 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1234 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1235 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1240 return allowed_min_rxd;
1244 * Check input rxd is valid or not.
1245 * If input rxd is not greater than any of maximum number
1246 * of RXDs of every Rx queues and is not less than any of
1247 * minimal number of RXDs of every Rx queues, it is valid.
1248 * if valid, return 0, else return -1
1251 check_nb_rxd(queueid_t rxd)
1253 uint16_t allowed_max_rxd;
1254 uint16_t allowed_min_rxd;
1257 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1258 if (rxd > allowed_max_rxd) {
1259 printf("Fail: input rxd (%u) can't be greater "
1260 "than max_rxds (%u) of port %u\n",
1267 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1268 if (rxd < allowed_min_rxd) {
1269 printf("Fail: input rxd (%u) can't be less "
1270 "than min_rxds (%u) of port %u\n",
1281 * Get the allowed maximum number of TXDs of every rx queues.
1282 * *pid return the port id which has minimal value of
1283 * max_txd in every tx queue.
1286 get_allowed_max_nb_txd(portid_t *pid)
1288 uint16_t allowed_max_txd = UINT16_MAX;
1290 struct rte_eth_dev_info dev_info;
1292 RTE_ETH_FOREACH_DEV(pi) {
1293 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1296 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1297 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1301 return allowed_max_txd;
1305 * Get the allowed maximum number of TXDs of every tx queues.
1306 * *pid return the port id which has minimal value of
1307 * min_txd in every tx queue.
1310 get_allowed_min_nb_txd(portid_t *pid)
1312 uint16_t allowed_min_txd = 0;
1314 struct rte_eth_dev_info dev_info;
1316 RTE_ETH_FOREACH_DEV(pi) {
1317 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1320 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1321 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1326 return allowed_min_txd;
1330 * Check input txd is valid or not.
1331 * If input txd is not greater than any of maximum number
1332 * of TXDs of every Rx queues, it is valid.
1333 * if valid, return 0, else return -1
1336 check_nb_txd(queueid_t txd)
1338 uint16_t allowed_max_txd;
1339 uint16_t allowed_min_txd;
1342 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1343 if (txd > allowed_max_txd) {
1344 printf("Fail: input txd (%u) can't be greater "
1345 "than max_txds (%u) of port %u\n",
1352 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1353 if (txd < allowed_min_txd) {
1354 printf("Fail: input txd (%u) can't be less "
1355 "than min_txds (%u) of port %u\n",
1366 * Get the allowed maximum number of hairpin queues.
1367 * *pid return the port id which has minimal value of
1368 * max_hairpin_queues in all ports.
1371 get_allowed_max_nb_hairpinq(portid_t *pid)
1373 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1375 struct rte_eth_hairpin_cap cap;
1377 RTE_ETH_FOREACH_DEV(pi) {
1378 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1382 if (cap.max_nb_queues < allowed_max_hairpinq) {
1383 allowed_max_hairpinq = cap.max_nb_queues;
1387 return allowed_max_hairpinq;
1391 * Check input hairpin is valid or not.
1392 * If input hairpin is not greater than any of maximum number
1393 * of hairpin queues of all ports, it is valid.
1394 * if valid, return 0, else return -1
1397 check_nb_hairpinq(queueid_t hairpinq)
1399 queueid_t allowed_max_hairpinq;
1402 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1403 if (hairpinq > allowed_max_hairpinq) {
1404 printf("Fail: input hairpin (%u) can't be greater "
1405 "than max_hairpin_queues (%u) of port %u\n",
1406 hairpinq, allowed_max_hairpinq, pid);
1416 struct rte_port *port;
1417 struct rte_mempool *mbp;
1418 unsigned int nb_mbuf_per_pool;
1420 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1421 struct rte_gro_param gro_param;
1428 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1430 /* Configuration of logical cores. */
1431 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1432 sizeof(struct fwd_lcore *) * nb_lcores,
1433 RTE_CACHE_LINE_SIZE);
1434 if (fwd_lcores == NULL) {
1435 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1436 "failed\n", nb_lcores);
1438 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1439 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1440 sizeof(struct fwd_lcore),
1441 RTE_CACHE_LINE_SIZE);
1442 if (fwd_lcores[lc_id] == NULL) {
1443 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1446 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1449 RTE_ETH_FOREACH_DEV(pid) {
1451 /* Apply default TxRx configuration for all ports */
1452 port->dev_conf.txmode = tx_mode;
1453 port->dev_conf.rxmode = rx_mode;
1455 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1457 rte_exit(EXIT_FAILURE,
1458 "rte_eth_dev_info_get() failed\n");
1460 if (!(port->dev_info.tx_offload_capa &
1461 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1462 port->dev_conf.txmode.offloads &=
1463 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1465 if (port_numa[pid] != NUMA_NO_CONFIG)
1466 port_per_socket[port_numa[pid]]++;
1468 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1471 * if socket_id is invalid,
1472 * set to the first available socket.
1474 if (check_socket_id(socket_id) < 0)
1475 socket_id = socket_ids[0];
1476 port_per_socket[socket_id]++;
1480 /* Apply Rx offloads configuration */
1481 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1482 port->rx_conf[k].offloads =
1483 port->dev_conf.rxmode.offloads;
1484 /* Apply Tx offloads configuration */
1485 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1486 port->tx_conf[k].offloads =
1487 port->dev_conf.txmode.offloads;
1489 /* set flag to initialize port/queue */
1490 port->need_reconfig = 1;
1491 port->need_reconfig_queues = 1;
1492 port->tx_metadata = 0;
1494 /* Check for maximum number of segments per MTU. Accordingly
1495 * update the mbuf data size.
1497 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1498 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1499 data_size = rx_mode.max_rx_pkt_len /
1500 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1502 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1503 mbuf_data_size[0]) {
1504 mbuf_data_size[0] = data_size +
1505 RTE_PKTMBUF_HEADROOM;
1512 TESTPMD_LOG(WARNING,
1513 "Configured mbuf size of the first segment %hu\n",
1516 * Create pools of mbuf.
1517 * If NUMA support is disabled, create a single pool of mbuf in
1518 * socket 0 memory by default.
1519 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1521 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1522 * nb_txd can be configured at run time.
1524 if (param_total_num_mbufs)
1525 nb_mbuf_per_pool = param_total_num_mbufs;
1527 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1528 (nb_lcores * mb_mempool_cache) +
1529 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1530 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1536 for (i = 0; i < num_sockets; i++)
1537 for (j = 0; j < mbuf_data_size_n; j++)
1538 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1539 mbuf_pool_create(mbuf_data_size[j],
1545 for (i = 0; i < mbuf_data_size_n; i++)
1546 mempools[i] = mbuf_pool_create
1549 socket_num == UMA_NO_CONFIG ?
1555 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1556 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1558 * Records which Mbuf pool to use by each logical core, if needed.
1560 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1561 mbp = mbuf_pool_find(
1562 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1565 mbp = mbuf_pool_find(0, 0);
1566 fwd_lcores[lc_id]->mbp = mbp;
1567 /* initialize GSO context */
1568 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1569 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1570 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1571 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1573 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1576 /* Configuration of packet forwarding streams. */
1577 if (init_fwd_streams() < 0)
1578 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1582 /* create a gro context for each lcore */
1583 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1584 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1585 gro_param.max_item_per_flow = MAX_PKT_BURST;
1586 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1587 gro_param.socket_id = rte_lcore_to_socket_id(
1588 fwd_lcores_cpuids[lc_id]);
1589 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1590 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1591 rte_exit(EXIT_FAILURE,
1592 "rte_gro_ctx_create() failed\n");
1599 reconfig(portid_t new_port_id, unsigned socket_id)
1601 struct rte_port *port;
1604 /* Reconfiguration of Ethernet ports. */
1605 port = &ports[new_port_id];
1607 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1611 /* set flag to initialize port/queue */
1612 port->need_reconfig = 1;
1613 port->need_reconfig_queues = 1;
1614 port->socket_id = socket_id;
1621 init_fwd_streams(void)
1624 struct rte_port *port;
1625 streamid_t sm_id, nb_fwd_streams_new;
1628 /* set socket id according to numa or not */
1629 RTE_ETH_FOREACH_DEV(pid) {
1631 if (nb_rxq > port->dev_info.max_rx_queues) {
1632 printf("Fail: nb_rxq(%d) is greater than "
1633 "max_rx_queues(%d)\n", nb_rxq,
1634 port->dev_info.max_rx_queues);
1637 if (nb_txq > port->dev_info.max_tx_queues) {
1638 printf("Fail: nb_txq(%d) is greater than "
1639 "max_tx_queues(%d)\n", nb_txq,
1640 port->dev_info.max_tx_queues);
1644 if (port_numa[pid] != NUMA_NO_CONFIG)
1645 port->socket_id = port_numa[pid];
1647 port->socket_id = rte_eth_dev_socket_id(pid);
1650 * if socket_id is invalid,
1651 * set to the first available socket.
1653 if (check_socket_id(port->socket_id) < 0)
1654 port->socket_id = socket_ids[0];
1658 if (socket_num == UMA_NO_CONFIG)
1659 port->socket_id = 0;
1661 port->socket_id = socket_num;
1665 q = RTE_MAX(nb_rxq, nb_txq);
1667 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1670 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1671 if (nb_fwd_streams_new == nb_fwd_streams)
1674 if (fwd_streams != NULL) {
1675 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1676 if (fwd_streams[sm_id] == NULL)
1678 rte_free(fwd_streams[sm_id]);
1679 fwd_streams[sm_id] = NULL;
1681 rte_free(fwd_streams);
1686 nb_fwd_streams = nb_fwd_streams_new;
1687 if (nb_fwd_streams) {
1688 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1689 sizeof(struct fwd_stream *) * nb_fwd_streams,
1690 RTE_CACHE_LINE_SIZE);
1691 if (fwd_streams == NULL)
1692 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1693 " (struct fwd_stream *)) failed\n",
1696 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1697 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1698 " struct fwd_stream", sizeof(struct fwd_stream),
1699 RTE_CACHE_LINE_SIZE);
1700 if (fwd_streams[sm_id] == NULL)
1701 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1702 "(struct fwd_stream) failed\n");
1710 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1712 uint64_t total_burst, sburst;
1714 uint64_t burst_stats[4];
1715 uint16_t pktnb_stats[4];
1717 int burst_percent[4], sburstp;
1721 * First compute the total number of packet bursts and the
1722 * two highest numbers of bursts of the same number of packets.
1724 memset(&burst_stats, 0x0, sizeof(burst_stats));
1725 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1727 /* Show stats for 0 burst size always */
1728 total_burst = pbs->pkt_burst_spread[0];
1729 burst_stats[0] = pbs->pkt_burst_spread[0];
1732 /* Find the next 2 burst sizes with highest occurrences. */
1733 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1734 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1739 total_burst += nb_burst;
1741 if (nb_burst > burst_stats[1]) {
1742 burst_stats[2] = burst_stats[1];
1743 pktnb_stats[2] = pktnb_stats[1];
1744 burst_stats[1] = nb_burst;
1745 pktnb_stats[1] = nb_pkt;
1746 } else if (nb_burst > burst_stats[2]) {
1747 burst_stats[2] = nb_burst;
1748 pktnb_stats[2] = nb_pkt;
1751 if (total_burst == 0)
1754 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1755 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1757 printf("%d%% of other]\n", 100 - sburstp);
1761 sburst += burst_stats[i];
1762 if (sburst == total_burst) {
1763 printf("%d%% of %d pkts]\n",
1764 100 - sburstp, (int) pktnb_stats[i]);
1769 (double)burst_stats[i] / total_burst * 100;
1770 printf("%d%% of %d pkts + ",
1771 burst_percent[i], (int) pktnb_stats[i]);
1772 sburstp += burst_percent[i];
1777 fwd_stream_stats_display(streamid_t stream_id)
1779 struct fwd_stream *fs;
1780 static const char *fwd_top_stats_border = "-------";
1782 fs = fwd_streams[stream_id];
1783 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1784 (fs->fwd_dropped == 0))
1786 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1787 "TX Port=%2d/Queue=%2d %s\n",
1788 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1789 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1790 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1791 " TX-dropped: %-14"PRIu64,
1792 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1794 /* if checksum mode */
1795 if (cur_fwd_eng == &csum_fwd_engine) {
1796 printf(" RX- bad IP checksum: %-14"PRIu64
1797 " Rx- bad L4 checksum: %-14"PRIu64
1798 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1799 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1800 fs->rx_bad_outer_l4_csum);
1805 if (record_burst_stats) {
1806 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1807 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1812 fwd_stats_display(void)
1814 static const char *fwd_stats_border = "----------------------";
1815 static const char *acc_stats_border = "+++++++++++++++";
1817 struct fwd_stream *rx_stream;
1818 struct fwd_stream *tx_stream;
1819 uint64_t tx_dropped;
1820 uint64_t rx_bad_ip_csum;
1821 uint64_t rx_bad_l4_csum;
1822 uint64_t rx_bad_outer_l4_csum;
1823 } ports_stats[RTE_MAX_ETHPORTS];
1824 uint64_t total_rx_dropped = 0;
1825 uint64_t total_tx_dropped = 0;
1826 uint64_t total_rx_nombuf = 0;
1827 struct rte_eth_stats stats;
1828 uint64_t fwd_cycles = 0;
1829 uint64_t total_recv = 0;
1830 uint64_t total_xmit = 0;
1831 struct rte_port *port;
1836 memset(ports_stats, 0, sizeof(ports_stats));
1838 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1839 struct fwd_stream *fs = fwd_streams[sm_id];
1841 if (cur_fwd_config.nb_fwd_streams >
1842 cur_fwd_config.nb_fwd_ports) {
1843 fwd_stream_stats_display(sm_id);
1845 ports_stats[fs->tx_port].tx_stream = fs;
1846 ports_stats[fs->rx_port].rx_stream = fs;
1849 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1851 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1852 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1853 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1854 fs->rx_bad_outer_l4_csum;
1856 if (record_core_cycles)
1857 fwd_cycles += fs->core_cycles;
1859 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1862 pt_id = fwd_ports_ids[i];
1863 port = &ports[pt_id];
1865 rte_eth_stats_get(pt_id, &stats);
1866 stats.ipackets -= port->stats.ipackets;
1867 stats.opackets -= port->stats.opackets;
1868 stats.ibytes -= port->stats.ibytes;
1869 stats.obytes -= port->stats.obytes;
1870 stats.imissed -= port->stats.imissed;
1871 stats.oerrors -= port->stats.oerrors;
1872 stats.rx_nombuf -= port->stats.rx_nombuf;
1874 total_recv += stats.ipackets;
1875 total_xmit += stats.opackets;
1876 total_rx_dropped += stats.imissed;
1877 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1878 total_tx_dropped += stats.oerrors;
1879 total_rx_nombuf += stats.rx_nombuf;
1881 printf("\n %s Forward statistics for port %-2d %s\n",
1882 fwd_stats_border, pt_id, fwd_stats_border);
1884 if (!port->rx_queue_stats_mapping_enabled &&
1885 !port->tx_queue_stats_mapping_enabled) {
1886 printf(" RX-packets: %-14"PRIu64
1887 " RX-dropped: %-14"PRIu64
1888 "RX-total: %-"PRIu64"\n",
1889 stats.ipackets, stats.imissed,
1890 stats.ipackets + stats.imissed);
1892 if (cur_fwd_eng == &csum_fwd_engine)
1893 printf(" Bad-ipcsum: %-14"PRIu64
1894 " Bad-l4csum: %-14"PRIu64
1895 "Bad-outer-l4csum: %-14"PRIu64"\n",
1896 ports_stats[pt_id].rx_bad_ip_csum,
1897 ports_stats[pt_id].rx_bad_l4_csum,
1898 ports_stats[pt_id].rx_bad_outer_l4_csum);
1899 if (stats.ierrors + stats.rx_nombuf > 0) {
1900 printf(" RX-error: %-"PRIu64"\n",
1902 printf(" RX-nombufs: %-14"PRIu64"\n",
1906 printf(" TX-packets: %-14"PRIu64
1907 " TX-dropped: %-14"PRIu64
1908 "TX-total: %-"PRIu64"\n",
1909 stats.opackets, ports_stats[pt_id].tx_dropped,
1910 stats.opackets + ports_stats[pt_id].tx_dropped);
1912 printf(" RX-packets: %14"PRIu64
1913 " RX-dropped:%14"PRIu64
1914 " RX-total:%14"PRIu64"\n",
1915 stats.ipackets, stats.imissed,
1916 stats.ipackets + stats.imissed);
1918 if (cur_fwd_eng == &csum_fwd_engine)
1919 printf(" Bad-ipcsum:%14"PRIu64
1920 " Bad-l4csum:%14"PRIu64
1921 " Bad-outer-l4csum: %-14"PRIu64"\n",
1922 ports_stats[pt_id].rx_bad_ip_csum,
1923 ports_stats[pt_id].rx_bad_l4_csum,
1924 ports_stats[pt_id].rx_bad_outer_l4_csum);
1925 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1926 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1927 printf(" RX-nombufs: %14"PRIu64"\n",
1931 printf(" TX-packets: %14"PRIu64
1932 " TX-dropped:%14"PRIu64
1933 " TX-total:%14"PRIu64"\n",
1934 stats.opackets, ports_stats[pt_id].tx_dropped,
1935 stats.opackets + ports_stats[pt_id].tx_dropped);
1938 if (record_burst_stats) {
1939 if (ports_stats[pt_id].rx_stream)
1940 pkt_burst_stats_display("RX",
1941 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1942 if (ports_stats[pt_id].tx_stream)
1943 pkt_burst_stats_display("TX",
1944 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1947 if (port->rx_queue_stats_mapping_enabled) {
1949 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1950 printf(" Stats reg %2d RX-packets:%14"PRIu64
1951 " RX-errors:%14"PRIu64
1952 " RX-bytes:%14"PRIu64"\n",
1953 j, stats.q_ipackets[j],
1954 stats.q_errors[j], stats.q_ibytes[j]);
1958 if (port->tx_queue_stats_mapping_enabled) {
1959 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1960 printf(" Stats reg %2d TX-packets:%14"PRIu64
1963 j, stats.q_opackets[j],
1968 printf(" %s--------------------------------%s\n",
1969 fwd_stats_border, fwd_stats_border);
1972 printf("\n %s Accumulated forward statistics for all ports"
1974 acc_stats_border, acc_stats_border);
1975 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1977 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1979 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1980 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1981 if (total_rx_nombuf > 0)
1982 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1983 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1985 acc_stats_border, acc_stats_border);
1986 if (record_core_cycles) {
1987 #define CYC_PER_MHZ 1E6
1988 if (total_recv > 0 || total_xmit > 0) {
1989 uint64_t total_pkts = 0;
1990 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
1991 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
1992 total_pkts = total_xmit;
1994 total_pkts = total_recv;
1996 printf("\n CPU cycles/packet=%.2F (total cycles="
1997 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
1999 (double) fwd_cycles / total_pkts,
2000 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2001 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2007 fwd_stats_reset(void)
2013 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2014 pt_id = fwd_ports_ids[i];
2015 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2017 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2018 struct fwd_stream *fs = fwd_streams[sm_id];
2022 fs->fwd_dropped = 0;
2023 fs->rx_bad_ip_csum = 0;
2024 fs->rx_bad_l4_csum = 0;
2025 fs->rx_bad_outer_l4_csum = 0;
2027 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2028 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2029 fs->core_cycles = 0;
2034 flush_fwd_rx_queues(void)
2036 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2043 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2044 uint64_t timer_period;
2046 /* convert to number of cycles */
2047 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2049 for (j = 0; j < 2; j++) {
2050 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2051 for (rxq = 0; rxq < nb_rxq; rxq++) {
2052 port_id = fwd_ports_ids[rxp];
2054 * testpmd can stuck in the below do while loop
2055 * if rte_eth_rx_burst() always returns nonzero
2056 * packets. So timer is added to exit this loop
2057 * after 1sec timer expiry.
2059 prev_tsc = rte_rdtsc();
2061 nb_rx = rte_eth_rx_burst(port_id, rxq,
2062 pkts_burst, MAX_PKT_BURST);
2063 for (i = 0; i < nb_rx; i++)
2064 rte_pktmbuf_free(pkts_burst[i]);
2066 cur_tsc = rte_rdtsc();
2067 diff_tsc = cur_tsc - prev_tsc;
2068 timer_tsc += diff_tsc;
2069 } while ((nb_rx > 0) &&
2070 (timer_tsc < timer_period));
2074 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2079 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2081 struct fwd_stream **fsm;
2084 #ifdef RTE_LIB_BITRATESTATS
2085 uint64_t tics_per_1sec;
2086 uint64_t tics_datum;
2087 uint64_t tics_current;
2088 uint16_t i, cnt_ports;
2090 cnt_ports = nb_ports;
2091 tics_datum = rte_rdtsc();
2092 tics_per_1sec = rte_get_timer_hz();
2094 fsm = &fwd_streams[fc->stream_idx];
2095 nb_fs = fc->stream_nb;
2097 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2098 (*pkt_fwd)(fsm[sm_id]);
2099 #ifdef RTE_LIB_BITRATESTATS
2100 if (bitrate_enabled != 0 &&
2101 bitrate_lcore_id == rte_lcore_id()) {
2102 tics_current = rte_rdtsc();
2103 if (tics_current - tics_datum >= tics_per_1sec) {
2104 /* Periodic bitrate calculation */
2105 for (i = 0; i < cnt_ports; i++)
2106 rte_stats_bitrate_calc(bitrate_data,
2108 tics_datum = tics_current;
2112 #ifdef RTE_LIB_LATENCYSTATS
2113 if (latencystats_enabled != 0 &&
2114 latencystats_lcore_id == rte_lcore_id())
2115 rte_latencystats_update();
2118 } while (! fc->stopped);
2122 start_pkt_forward_on_core(void *fwd_arg)
2124 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2125 cur_fwd_config.fwd_eng->packet_fwd);
2130 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2131 * Used to start communication flows in network loopback test configurations.
2134 run_one_txonly_burst_on_core(void *fwd_arg)
2136 struct fwd_lcore *fwd_lc;
2137 struct fwd_lcore tmp_lcore;
2139 fwd_lc = (struct fwd_lcore *) fwd_arg;
2140 tmp_lcore = *fwd_lc;
2141 tmp_lcore.stopped = 1;
2142 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2147 * Launch packet forwarding:
2148 * - Setup per-port forwarding context.
2149 * - launch logical cores with their forwarding configuration.
2152 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2154 port_fwd_begin_t port_fwd_begin;
2159 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2160 if (port_fwd_begin != NULL) {
2161 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2162 (*port_fwd_begin)(fwd_ports_ids[i]);
2164 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2165 lc_id = fwd_lcores_cpuids[i];
2166 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2167 fwd_lcores[i]->stopped = 0;
2168 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2169 fwd_lcores[i], lc_id);
2171 printf("launch lcore %u failed - diag=%d\n",
2178 * Launch packet forwarding configuration.
2181 start_packet_forwarding(int with_tx_first)
2183 port_fwd_begin_t port_fwd_begin;
2184 port_fwd_end_t port_fwd_end;
2185 struct rte_port *port;
2189 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2190 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2192 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2193 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2195 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2196 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2197 (!nb_rxq || !nb_txq))
2198 rte_exit(EXIT_FAILURE,
2199 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2200 cur_fwd_eng->fwd_mode_name);
2202 if (all_ports_started() == 0) {
2203 printf("Not all ports were started\n");
2206 if (test_done == 0) {
2207 printf("Packet forwarding already started\n");
2213 for (i = 0; i < nb_fwd_ports; i++) {
2214 pt_id = fwd_ports_ids[i];
2215 port = &ports[pt_id];
2216 if (!port->dcb_flag) {
2217 printf("In DCB mode, all forwarding ports must "
2218 "be configured in this mode.\n");
2222 if (nb_fwd_lcores == 1) {
2223 printf("In DCB mode,the nb forwarding cores "
2224 "should be larger than 1.\n");
2233 flush_fwd_rx_queues();
2235 pkt_fwd_config_display(&cur_fwd_config);
2236 rxtx_config_display();
2239 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2240 pt_id = fwd_ports_ids[i];
2241 port = &ports[pt_id];
2242 map_port_queue_stats_mapping_registers(pt_id, port);
2244 if (with_tx_first) {
2245 port_fwd_begin = tx_only_engine.port_fwd_begin;
2246 if (port_fwd_begin != NULL) {
2247 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2248 (*port_fwd_begin)(fwd_ports_ids[i]);
2250 while (with_tx_first--) {
2251 launch_packet_forwarding(
2252 run_one_txonly_burst_on_core);
2253 rte_eal_mp_wait_lcore();
2255 port_fwd_end = tx_only_engine.port_fwd_end;
2256 if (port_fwd_end != NULL) {
2257 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2258 (*port_fwd_end)(fwd_ports_ids[i]);
2261 launch_packet_forwarding(start_pkt_forward_on_core);
2265 stop_packet_forwarding(void)
2267 port_fwd_end_t port_fwd_end;
2273 printf("Packet forwarding not started\n");
2276 printf("Telling cores to stop...");
2277 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2278 fwd_lcores[lc_id]->stopped = 1;
2279 printf("\nWaiting for lcores to finish...\n");
2280 rte_eal_mp_wait_lcore();
2281 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2282 if (port_fwd_end != NULL) {
2283 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2284 pt_id = fwd_ports_ids[i];
2285 (*port_fwd_end)(pt_id);
2289 fwd_stats_display();
2291 printf("\nDone.\n");
2296 dev_set_link_up(portid_t pid)
2298 if (rte_eth_dev_set_link_up(pid) < 0)
2299 printf("\nSet link up fail.\n");
2303 dev_set_link_down(portid_t pid)
2305 if (rte_eth_dev_set_link_down(pid) < 0)
2306 printf("\nSet link down fail.\n");
2310 all_ports_started(void)
2313 struct rte_port *port;
2315 RTE_ETH_FOREACH_DEV(pi) {
2317 /* Check if there is a port which is not started */
2318 if ((port->port_status != RTE_PORT_STARTED) &&
2319 (port->slave_flag == 0))
2323 /* No port is not started */
2328 port_is_stopped(portid_t port_id)
2330 struct rte_port *port = &ports[port_id];
2332 if ((port->port_status != RTE_PORT_STOPPED) &&
2333 (port->slave_flag == 0))
2339 all_ports_stopped(void)
2343 RTE_ETH_FOREACH_DEV(pi) {
2344 if (!port_is_stopped(pi))
2352 port_is_started(portid_t port_id)
2354 if (port_id_is_invalid(port_id, ENABLED_WARN))
2357 if (ports[port_id].port_status != RTE_PORT_STARTED)
2363 /* Configure the Rx and Tx hairpin queues for the selected port. */
2365 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2368 struct rte_eth_hairpin_conf hairpin_conf = {
2373 struct rte_port *port = &ports[pi];
2374 uint16_t peer_rx_port = pi;
2375 uint16_t peer_tx_port = pi;
2376 uint32_t manual = 1;
2377 uint32_t tx_exp = hairpin_mode & 0x10;
2379 if (!(hairpin_mode & 0xf)) {
2383 } else if (hairpin_mode & 0x1) {
2384 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2385 RTE_ETH_DEV_NO_OWNER);
2386 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2387 peer_tx_port = rte_eth_find_next_owned_by(0,
2388 RTE_ETH_DEV_NO_OWNER);
2389 if (p_pi != RTE_MAX_ETHPORTS) {
2390 peer_rx_port = p_pi;
2394 /* Last port will be the peer RX port of the first. */
2395 RTE_ETH_FOREACH_DEV(next_pi)
2396 peer_rx_port = next_pi;
2399 } else if (hairpin_mode & 0x2) {
2401 peer_rx_port = p_pi;
2403 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2404 RTE_ETH_DEV_NO_OWNER);
2405 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2408 peer_tx_port = peer_rx_port;
2412 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2413 hairpin_conf.peers[0].port = peer_rx_port;
2414 hairpin_conf.peers[0].queue = i + nb_rxq;
2415 hairpin_conf.manual_bind = !!manual;
2416 hairpin_conf.tx_explicit = !!tx_exp;
2417 diag = rte_eth_tx_hairpin_queue_setup
2418 (pi, qi, nb_txd, &hairpin_conf);
2423 /* Fail to setup rx queue, return */
2424 if (rte_atomic16_cmpset(&(port->port_status),
2426 RTE_PORT_STOPPED) == 0)
2427 printf("Port %d can not be set back "
2428 "to stopped\n", pi);
2429 printf("Fail to configure port %d hairpin "
2431 /* try to reconfigure queues next time */
2432 port->need_reconfig_queues = 1;
2435 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2436 hairpin_conf.peers[0].port = peer_tx_port;
2437 hairpin_conf.peers[0].queue = i + nb_txq;
2438 hairpin_conf.manual_bind = !!manual;
2439 hairpin_conf.tx_explicit = !!tx_exp;
2440 diag = rte_eth_rx_hairpin_queue_setup
2441 (pi, qi, nb_rxd, &hairpin_conf);
2446 /* Fail to setup rx queue, return */
2447 if (rte_atomic16_cmpset(&(port->port_status),
2449 RTE_PORT_STOPPED) == 0)
2450 printf("Port %d can not be set back "
2451 "to stopped\n", pi);
2452 printf("Fail to configure port %d hairpin "
2454 /* try to reconfigure queues next time */
2455 port->need_reconfig_queues = 1;
2461 /* Configure the Rx with optional split. */
2463 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2464 uint16_t nb_rx_desc, unsigned int socket_id,
2465 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2467 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2468 unsigned int i, mp_n;
2471 if (rx_pkt_nb_segs <= 1 ||
2472 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2473 rx_conf->rx_seg = NULL;
2474 rx_conf->rx_nseg = 0;
2475 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2476 nb_rx_desc, socket_id,
2480 for (i = 0; i < rx_pkt_nb_segs; i++) {
2481 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2482 struct rte_mempool *mpx;
2484 * Use last valid pool for the segments with number
2485 * exceeding the pool index.
2487 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2488 mpx = mbuf_pool_find(socket_id, mp_n);
2489 /* Handle zero as mbuf data buffer size. */
2490 rx_seg->length = rx_pkt_seg_lengths[i] ?
2491 rx_pkt_seg_lengths[i] :
2492 mbuf_data_size[mp_n];
2493 rx_seg->offset = i < rx_pkt_nb_offs ?
2494 rx_pkt_seg_offsets[i] : 0;
2495 rx_seg->mp = mpx ? mpx : mp;
2497 rx_conf->rx_nseg = rx_pkt_nb_segs;
2498 rx_conf->rx_seg = rx_useg;
2499 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2500 socket_id, rx_conf, NULL);
2501 rx_conf->rx_seg = NULL;
2502 rx_conf->rx_nseg = 0;
2507 start_port(portid_t pid)
2509 int diag, need_check_link_status = -1;
2511 portid_t p_pi = RTE_MAX_ETHPORTS;
2512 portid_t pl[RTE_MAX_ETHPORTS];
2513 portid_t peer_pl[RTE_MAX_ETHPORTS];
2514 uint16_t cnt_pi = 0;
2515 uint16_t cfg_pi = 0;
2518 struct rte_port *port;
2519 struct rte_ether_addr mac_addr;
2520 struct rte_eth_hairpin_cap cap;
2522 if (port_id_is_invalid(pid, ENABLED_WARN))
2527 RTE_ETH_FOREACH_DEV(pi) {
2528 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2531 need_check_link_status = 0;
2533 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2534 RTE_PORT_HANDLING) == 0) {
2535 printf("Port %d is now not stopped\n", pi);
2539 if (port->need_reconfig > 0) {
2540 uint16_t mtu = RTE_ETHER_MTU;
2542 port->need_reconfig = 0;
2544 if (flow_isolate_all) {
2545 int ret = port_flow_isolate(pi, 1);
2547 printf("Failed to apply isolated"
2548 " mode on port %d\n", pi);
2552 configure_rxtx_dump_callbacks(0);
2553 printf("Configuring Port %d (socket %u)\n", pi,
2555 if (nb_hairpinq > 0 &&
2556 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2557 printf("Port %d doesn't support hairpin "
2561 /* configure port */
2562 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2563 nb_txq + nb_hairpinq,
2566 if (rte_atomic16_cmpset(&(port->port_status),
2567 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2568 printf("Port %d can not be set back "
2569 "to stopped\n", pi);
2570 printf("Fail to configure port %d\n", pi);
2571 /* try to reconfigure port next time */
2572 port->need_reconfig = 1;
2577 * Workaround for rte_eth_dev_configure(), max_rx_pkt_len
2578 * set MTU wrong for the PMDs that have frame overhead
2579 * bigger than RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN.
2580 * For a PMD that has 26 bytes overhead, rte_eth_dev_configure()
2581 * can set MTU to max 1492, not to expected 1500 bytes.
2582 * Using rte_eth_dev_set_mtu() to be able to set MTU correctly,
2583 * default MTU value is 1500.
2585 diag = rte_eth_dev_get_mtu(pi, &mtu);
2587 printf("Failed to get MTU for port %d\n", pi);
2588 diag = rte_eth_dev_set_mtu(pi, mtu);
2589 if (diag != 0 && diag != -ENOTSUP)
2590 printf("Failed to set MTU to %u for port %d\n",
2593 if (port->need_reconfig_queues > 0) {
2594 port->need_reconfig_queues = 0;
2595 /* setup tx queues */
2596 for (qi = 0; qi < nb_txq; qi++) {
2597 if ((numa_support) &&
2598 (txring_numa[pi] != NUMA_NO_CONFIG))
2599 diag = rte_eth_tx_queue_setup(pi, qi,
2600 port->nb_tx_desc[qi],
2602 &(port->tx_conf[qi]));
2604 diag = rte_eth_tx_queue_setup(pi, qi,
2605 port->nb_tx_desc[qi],
2607 &(port->tx_conf[qi]));
2612 /* Fail to setup tx queue, return */
2613 if (rte_atomic16_cmpset(&(port->port_status),
2615 RTE_PORT_STOPPED) == 0)
2616 printf("Port %d can not be set back "
2617 "to stopped\n", pi);
2618 printf("Fail to configure port %d tx queues\n",
2620 /* try to reconfigure queues next time */
2621 port->need_reconfig_queues = 1;
2624 for (qi = 0; qi < nb_rxq; qi++) {
2625 /* setup rx queues */
2626 if ((numa_support) &&
2627 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2628 struct rte_mempool * mp =
2630 (rxring_numa[pi], 0);
2632 printf("Failed to setup RX queue:"
2633 "No mempool allocation"
2634 " on the socket %d\n",
2639 diag = rx_queue_setup(pi, qi,
2640 port->nb_rx_desc[qi],
2642 &(port->rx_conf[qi]),
2645 struct rte_mempool *mp =
2647 (port->socket_id, 0);
2649 printf("Failed to setup RX queue:"
2650 "No mempool allocation"
2651 " on the socket %d\n",
2655 diag = rx_queue_setup(pi, qi,
2656 port->nb_rx_desc[qi],
2658 &(port->rx_conf[qi]),
2664 /* Fail to setup rx queue, return */
2665 if (rte_atomic16_cmpset(&(port->port_status),
2667 RTE_PORT_STOPPED) == 0)
2668 printf("Port %d can not be set back "
2669 "to stopped\n", pi);
2670 printf("Fail to configure port %d rx queues\n",
2672 /* try to reconfigure queues next time */
2673 port->need_reconfig_queues = 1;
2676 /* setup hairpin queues */
2677 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2680 configure_rxtx_dump_callbacks(verbose_level);
2682 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2686 "Port %d: Failed to disable Ptype parsing\n",
2694 if (rte_eth_dev_start(pi) < 0) {
2695 printf("Fail to start port %d\n", pi);
2697 /* Fail to setup rx queue, return */
2698 if (rte_atomic16_cmpset(&(port->port_status),
2699 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2700 printf("Port %d can not be set back to "
2705 if (rte_atomic16_cmpset(&(port->port_status),
2706 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2707 printf("Port %d can not be set into started\n", pi);
2709 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2710 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2711 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2712 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2713 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2715 /* at least one port started, need checking link status */
2716 need_check_link_status = 1;
2721 if (need_check_link_status == 1 && !no_link_check)
2722 check_all_ports_link_status(RTE_PORT_ALL);
2723 else if (need_check_link_status == 0)
2724 printf("Please stop the ports first\n");
2726 if (hairpin_mode & 0xf) {
2730 /* bind all started hairpin ports */
2731 for (i = 0; i < cfg_pi; i++) {
2733 /* bind current Tx to all peer Rx */
2734 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2735 RTE_MAX_ETHPORTS, 1);
2738 for (j = 0; j < peer_pi; j++) {
2739 if (!port_is_started(peer_pl[j]))
2741 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2743 printf("Error during binding hairpin"
2744 " Tx port %u to %u: %s\n",
2746 rte_strerror(-diag));
2750 /* bind all peer Tx to current Rx */
2751 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2752 RTE_MAX_ETHPORTS, 0);
2755 for (j = 0; j < peer_pi; j++) {
2756 if (!port_is_started(peer_pl[j]))
2758 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2760 printf("Error during binding hairpin"
2761 " Tx port %u to %u: %s\n",
2763 rte_strerror(-diag));
2775 stop_port(portid_t pid)
2778 struct rte_port *port;
2779 int need_check_link_status = 0;
2780 portid_t peer_pl[RTE_MAX_ETHPORTS];
2788 if (port_id_is_invalid(pid, ENABLED_WARN))
2791 printf("Stopping ports...\n");
2793 RTE_ETH_FOREACH_DEV(pi) {
2794 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2797 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2798 printf("Please remove port %d from forwarding configuration.\n", pi);
2802 if (port_is_bonding_slave(pi)) {
2803 printf("Please remove port %d from bonded device.\n", pi);
2808 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2809 RTE_PORT_HANDLING) == 0)
2812 if (hairpin_mode & 0xf) {
2815 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2816 /* unbind all peer Tx from current Rx */
2817 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2818 RTE_MAX_ETHPORTS, 0);
2821 for (j = 0; j < peer_pi; j++) {
2822 if (!port_is_started(peer_pl[j]))
2824 rte_eth_hairpin_unbind(peer_pl[j], pi);
2828 if (rte_eth_dev_stop(pi) != 0)
2829 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2832 if (rte_atomic16_cmpset(&(port->port_status),
2833 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2834 printf("Port %d can not be set into stopped\n", pi);
2835 need_check_link_status = 1;
2837 if (need_check_link_status && !no_link_check)
2838 check_all_ports_link_status(RTE_PORT_ALL);
2844 remove_invalid_ports_in(portid_t *array, portid_t *total)
2847 portid_t new_total = 0;
2849 for (i = 0; i < *total; i++)
2850 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2851 array[new_total] = array[i];
2858 remove_invalid_ports(void)
2860 remove_invalid_ports_in(ports_ids, &nb_ports);
2861 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2862 nb_cfg_ports = nb_fwd_ports;
2866 close_port(portid_t pid)
2869 struct rte_port *port;
2871 if (port_id_is_invalid(pid, ENABLED_WARN))
2874 printf("Closing ports...\n");
2876 RTE_ETH_FOREACH_DEV(pi) {
2877 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2880 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2881 printf("Please remove port %d from forwarding configuration.\n", pi);
2885 if (port_is_bonding_slave(pi)) {
2886 printf("Please remove port %d from bonded device.\n", pi);
2891 if (rte_atomic16_cmpset(&(port->port_status),
2892 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2893 printf("Port %d is already closed\n", pi);
2897 port_flow_flush(pi);
2898 rte_eth_dev_close(pi);
2901 remove_invalid_ports();
2906 reset_port(portid_t pid)
2910 struct rte_port *port;
2912 if (port_id_is_invalid(pid, ENABLED_WARN))
2915 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2916 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2917 printf("Can not reset port(s), please stop port(s) first.\n");
2921 printf("Resetting ports...\n");
2923 RTE_ETH_FOREACH_DEV(pi) {
2924 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2927 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2928 printf("Please remove port %d from forwarding "
2929 "configuration.\n", pi);
2933 if (port_is_bonding_slave(pi)) {
2934 printf("Please remove port %d from bonded device.\n",
2939 diag = rte_eth_dev_reset(pi);
2942 port->need_reconfig = 1;
2943 port->need_reconfig_queues = 1;
2945 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2953 attach_port(char *identifier)
2956 struct rte_dev_iterator iterator;
2958 printf("Attaching a new port...\n");
2960 if (identifier == NULL) {
2961 printf("Invalid parameters are specified\n");
2965 if (rte_dev_probe(identifier) < 0) {
2966 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2970 /* first attach mode: event */
2971 if (setup_on_probe_event) {
2972 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2973 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2974 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2975 ports[pi].need_setup != 0)
2976 setup_attached_port(pi);
2980 /* second attach mode: iterator */
2981 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2982 /* setup ports matching the devargs used for probing */
2983 if (port_is_forwarding(pi))
2984 continue; /* port was already attached before */
2985 setup_attached_port(pi);
2990 setup_attached_port(portid_t pi)
2992 unsigned int socket_id;
2995 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2996 /* if socket_id is invalid, set to the first available socket. */
2997 if (check_socket_id(socket_id) < 0)
2998 socket_id = socket_ids[0];
2999 reconfig(pi, socket_id);
3000 ret = rte_eth_promiscuous_enable(pi);
3002 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3003 pi, rte_strerror(-ret));
3005 ports_ids[nb_ports++] = pi;
3006 fwd_ports_ids[nb_fwd_ports++] = pi;
3007 nb_cfg_ports = nb_fwd_ports;
3008 ports[pi].need_setup = 0;
3009 ports[pi].port_status = RTE_PORT_STOPPED;
3011 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3016 detach_device(struct rte_device *dev)
3021 printf("Device already removed\n");
3025 printf("Removing a device...\n");
3027 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3028 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3029 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3030 printf("Port %u not stopped\n", sibling);
3033 port_flow_flush(sibling);
3037 if (rte_dev_remove(dev) < 0) {
3038 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3041 remove_invalid_ports();
3043 printf("Device is detached\n");
3044 printf("Now total ports is %d\n", nb_ports);
3050 detach_port_device(portid_t port_id)
3052 if (port_id_is_invalid(port_id, ENABLED_WARN))
3055 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3056 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3057 printf("Port not stopped\n");
3060 printf("Port was not closed\n");
3063 detach_device(rte_eth_devices[port_id].device);
3067 detach_devargs(char *identifier)
3069 struct rte_dev_iterator iterator;
3070 struct rte_devargs da;
3073 printf("Removing a device...\n");
3075 memset(&da, 0, sizeof(da));
3076 if (rte_devargs_parsef(&da, "%s", identifier)) {
3077 printf("cannot parse identifier\n");
3083 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3084 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3085 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3086 printf("Port %u not stopped\n", port_id);
3087 rte_eth_iterator_cleanup(&iterator);
3090 port_flow_flush(port_id);
3094 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3095 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3096 da.name, da.bus->name);
3100 remove_invalid_ports();
3102 printf("Device %s is detached\n", identifier);
3103 printf("Now total ports is %d\n", nb_ports);
3115 stop_packet_forwarding();
3117 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3119 if (mp_alloc_type == MP_ALLOC_ANON)
3120 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3124 if (ports != NULL) {
3126 RTE_ETH_FOREACH_DEV(pt_id) {
3127 printf("\nStopping port %d...\n", pt_id);
3131 RTE_ETH_FOREACH_DEV(pt_id) {
3132 printf("\nShutting down port %d...\n", pt_id);
3139 ret = rte_dev_event_monitor_stop();
3142 "fail to stop device event monitor.");
3146 ret = rte_dev_event_callback_unregister(NULL,
3147 dev_event_callback, NULL);
3150 "fail to unregister device event callback.\n");
3154 ret = rte_dev_hotplug_handle_disable();
3157 "fail to disable hotplug handling.\n");
3161 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3163 rte_mempool_free(mempools[i]);
3166 printf("\nBye...\n");
3169 typedef void (*cmd_func_t)(void);
3170 struct pmd_test_command {
3171 const char *cmd_name;
3172 cmd_func_t cmd_func;
3175 /* Check the link status of all ports in up to 9s, and print them finally */
3177 check_all_ports_link_status(uint32_t port_mask)
3179 #define CHECK_INTERVAL 100 /* 100ms */
3180 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3182 uint8_t count, all_ports_up, print_flag = 0;
3183 struct rte_eth_link link;
3185 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3187 printf("Checking link statuses...\n");
3189 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3191 RTE_ETH_FOREACH_DEV(portid) {
3192 if ((port_mask & (1 << portid)) == 0)
3194 memset(&link, 0, sizeof(link));
3195 ret = rte_eth_link_get_nowait(portid, &link);
3198 if (print_flag == 1)
3199 printf("Port %u link get failed: %s\n",
3200 portid, rte_strerror(-ret));
3203 /* print link status if flag set */
3204 if (print_flag == 1) {
3205 rte_eth_link_to_str(link_status,
3206 sizeof(link_status), &link);
3207 printf("Port %d %s\n", portid, link_status);
3210 /* clear all_ports_up flag if any link down */
3211 if (link.link_status == ETH_LINK_DOWN) {
3216 /* after finally printing all link status, get out */
3217 if (print_flag == 1)
3220 if (all_ports_up == 0) {
3222 rte_delay_ms(CHECK_INTERVAL);
3225 /* set the print_flag if all ports up or timeout */
3226 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3236 rmv_port_callback(void *arg)
3238 int need_to_start = 0;
3239 int org_no_link_check = no_link_check;
3240 portid_t port_id = (intptr_t)arg;
3241 struct rte_device *dev;
3243 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3245 if (!test_done && port_is_forwarding(port_id)) {
3247 stop_packet_forwarding();
3251 no_link_check = org_no_link_check;
3253 /* Save rte_device pointer before closing ethdev port */
3254 dev = rte_eth_devices[port_id].device;
3255 close_port(port_id);
3256 detach_device(dev); /* might be already removed or have more ports */
3259 start_packet_forwarding(0);
3262 /* This function is used by the interrupt thread */
3264 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3267 RTE_SET_USED(param);
3268 RTE_SET_USED(ret_param);
3270 if (type >= RTE_ETH_EVENT_MAX) {
3271 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3272 port_id, __func__, type);
3274 } else if (event_print_mask & (UINT32_C(1) << type)) {
3275 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3276 eth_event_desc[type]);
3281 case RTE_ETH_EVENT_NEW:
3282 ports[port_id].need_setup = 1;
3283 ports[port_id].port_status = RTE_PORT_HANDLING;
3285 case RTE_ETH_EVENT_INTR_RMV:
3286 if (port_id_is_invalid(port_id, DISABLED_WARN))
3288 if (rte_eal_alarm_set(100000,
3289 rmv_port_callback, (void *)(intptr_t)port_id))
3290 fprintf(stderr, "Could not set up deferred device removal\n");
3292 case RTE_ETH_EVENT_DESTROY:
3293 ports[port_id].port_status = RTE_PORT_CLOSED;
3294 printf("Port %u is closed\n", port_id);
3303 register_eth_event_callback(void)
3306 enum rte_eth_event_type event;
3308 for (event = RTE_ETH_EVENT_UNKNOWN;
3309 event < RTE_ETH_EVENT_MAX; event++) {
3310 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3315 TESTPMD_LOG(ERR, "Failed to register callback for "
3316 "%s event\n", eth_event_desc[event]);
3324 /* This function is used by the interrupt thread */
3326 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3327 __rte_unused void *arg)
3332 if (type >= RTE_DEV_EVENT_MAX) {
3333 fprintf(stderr, "%s called upon invalid event %d\n",
3339 case RTE_DEV_EVENT_REMOVE:
3340 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3342 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3344 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3349 * Because the user's callback is invoked in eal interrupt
3350 * callback, the interrupt callback need to be finished before
3351 * it can be unregistered when detaching device. So finish
3352 * callback soon and use a deferred removal to detach device
3353 * is need. It is a workaround, once the device detaching be
3354 * moved into the eal in the future, the deferred removal could
3357 if (rte_eal_alarm_set(100000,
3358 rmv_port_callback, (void *)(intptr_t)port_id))
3360 "Could not set up deferred device removal\n");
3362 case RTE_DEV_EVENT_ADD:
3363 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3365 /* TODO: After finish kernel driver binding,
3366 * begin to attach port.
3375 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3379 uint8_t mapping_found = 0;
3381 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3382 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3383 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3384 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3385 tx_queue_stats_mappings[i].queue_id,
3386 tx_queue_stats_mappings[i].stats_counter_id);
3393 port->tx_queue_stats_mapping_enabled = 1;
3398 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3402 uint8_t mapping_found = 0;
3404 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3405 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3406 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3407 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3408 rx_queue_stats_mappings[i].queue_id,
3409 rx_queue_stats_mappings[i].stats_counter_id);
3416 port->rx_queue_stats_mapping_enabled = 1;
3421 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3425 diag = set_tx_queue_stats_mapping_registers(pi, port);
3427 if (diag == -ENOTSUP) {
3428 port->tx_queue_stats_mapping_enabled = 0;
3429 printf("TX queue stats mapping not supported port id=%d\n", pi);
3432 rte_exit(EXIT_FAILURE,
3433 "set_tx_queue_stats_mapping_registers "
3434 "failed for port id=%d diag=%d\n",
3438 diag = set_rx_queue_stats_mapping_registers(pi, port);
3440 if (diag == -ENOTSUP) {
3441 port->rx_queue_stats_mapping_enabled = 0;
3442 printf("RX queue stats mapping not supported port id=%d\n", pi);
3445 rte_exit(EXIT_FAILURE,
3446 "set_rx_queue_stats_mapping_registers "
3447 "failed for port id=%d diag=%d\n",
3453 rxtx_port_config(struct rte_port *port)
3458 for (qid = 0; qid < nb_rxq; qid++) {
3459 offloads = port->rx_conf[qid].offloads;
3460 port->rx_conf[qid] = port->dev_info.default_rxconf;
3462 port->rx_conf[qid].offloads = offloads;
3464 /* Check if any Rx parameters have been passed */
3465 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3466 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3468 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3469 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3471 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3472 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3474 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3475 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3477 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3478 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3480 port->nb_rx_desc[qid] = nb_rxd;
3483 for (qid = 0; qid < nb_txq; qid++) {
3484 offloads = port->tx_conf[qid].offloads;
3485 port->tx_conf[qid] = port->dev_info.default_txconf;
3487 port->tx_conf[qid].offloads = offloads;
3489 /* Check if any Tx parameters have been passed */
3490 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3491 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3493 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3494 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3496 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3497 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3499 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3500 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3502 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3503 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3505 port->nb_tx_desc[qid] = nb_txd;
3510 init_port_config(void)
3513 struct rte_port *port;
3516 RTE_ETH_FOREACH_DEV(pid) {
3518 port->dev_conf.fdir_conf = fdir_conf;
3520 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3525 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3526 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3527 rss_hf & port->dev_info.flow_type_rss_offloads;
3529 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3530 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3533 if (port->dcb_flag == 0) {
3534 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3535 port->dev_conf.rxmode.mq_mode =
3536 (enum rte_eth_rx_mq_mode)
3537 (rx_mq_mode & ETH_MQ_RX_RSS);
3539 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3542 rxtx_port_config(port);
3544 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3548 map_port_queue_stats_mapping_registers(pid, port);
3549 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3550 rte_pmd_ixgbe_bypass_init(pid);
3553 if (lsc_interrupt &&
3554 (rte_eth_devices[pid].data->dev_flags &
3555 RTE_ETH_DEV_INTR_LSC))
3556 port->dev_conf.intr_conf.lsc = 1;
3557 if (rmv_interrupt &&
3558 (rte_eth_devices[pid].data->dev_flags &
3559 RTE_ETH_DEV_INTR_RMV))
3560 port->dev_conf.intr_conf.rmv = 1;
3564 void set_port_slave_flag(portid_t slave_pid)
3566 struct rte_port *port;
3568 port = &ports[slave_pid];
3569 port->slave_flag = 1;
3572 void clear_port_slave_flag(portid_t slave_pid)
3574 struct rte_port *port;
3576 port = &ports[slave_pid];
3577 port->slave_flag = 0;
3580 uint8_t port_is_bonding_slave(portid_t slave_pid)
3582 struct rte_port *port;
3584 port = &ports[slave_pid];
3585 if ((rte_eth_devices[slave_pid].data->dev_flags &
3586 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3591 const uint16_t vlan_tags[] = {
3592 0, 1, 2, 3, 4, 5, 6, 7,
3593 8, 9, 10, 11, 12, 13, 14, 15,
3594 16, 17, 18, 19, 20, 21, 22, 23,
3595 24, 25, 26, 27, 28, 29, 30, 31
3599 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3600 enum dcb_mode_enable dcb_mode,
3601 enum rte_eth_nb_tcs num_tcs,
3606 struct rte_eth_rss_conf rss_conf;
3609 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3610 * given above, and the number of traffic classes available for use.
3612 if (dcb_mode == DCB_VT_ENABLED) {
3613 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3614 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3615 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3616 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3618 /* VMDQ+DCB RX and TX configurations */
3619 vmdq_rx_conf->enable_default_pool = 0;
3620 vmdq_rx_conf->default_pool = 0;
3621 vmdq_rx_conf->nb_queue_pools =
3622 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3623 vmdq_tx_conf->nb_queue_pools =
3624 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3626 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3627 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3628 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3629 vmdq_rx_conf->pool_map[i].pools =
3630 1 << (i % vmdq_rx_conf->nb_queue_pools);
3632 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3633 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3634 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3637 /* set DCB mode of RX and TX of multiple queues */
3638 eth_conf->rxmode.mq_mode =
3639 (enum rte_eth_rx_mq_mode)
3640 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3641 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3643 struct rte_eth_dcb_rx_conf *rx_conf =
3644 ð_conf->rx_adv_conf.dcb_rx_conf;
3645 struct rte_eth_dcb_tx_conf *tx_conf =
3646 ð_conf->tx_adv_conf.dcb_tx_conf;
3648 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3650 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3654 rx_conf->nb_tcs = num_tcs;
3655 tx_conf->nb_tcs = num_tcs;
3657 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3658 rx_conf->dcb_tc[i] = i % num_tcs;
3659 tx_conf->dcb_tc[i] = i % num_tcs;
3662 eth_conf->rxmode.mq_mode =
3663 (enum rte_eth_rx_mq_mode)
3664 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3665 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3666 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3670 eth_conf->dcb_capability_en =
3671 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3673 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3679 init_port_dcb_config(portid_t pid,
3680 enum dcb_mode_enable dcb_mode,
3681 enum rte_eth_nb_tcs num_tcs,
3684 struct rte_eth_conf port_conf;
3685 struct rte_port *rte_port;
3689 rte_port = &ports[pid];
3691 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3692 /* Enter DCB configuration status */
3695 port_conf.rxmode = rte_port->dev_conf.rxmode;
3696 port_conf.txmode = rte_port->dev_conf.txmode;
3698 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3699 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3702 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3704 /* re-configure the device . */
3705 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3709 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3713 /* If dev_info.vmdq_pool_base is greater than 0,
3714 * the queue id of vmdq pools is started after pf queues.
3716 if (dcb_mode == DCB_VT_ENABLED &&
3717 rte_port->dev_info.vmdq_pool_base > 0) {
3718 printf("VMDQ_DCB multi-queue mode is nonsensical"
3719 " for port %d.", pid);
3723 /* Assume the ports in testpmd have the same dcb capability
3724 * and has the same number of rxq and txq in dcb mode
3726 if (dcb_mode == DCB_VT_ENABLED) {
3727 if (rte_port->dev_info.max_vfs > 0) {
3728 nb_rxq = rte_port->dev_info.nb_rx_queues;
3729 nb_txq = rte_port->dev_info.nb_tx_queues;
3731 nb_rxq = rte_port->dev_info.max_rx_queues;
3732 nb_txq = rte_port->dev_info.max_tx_queues;
3735 /*if vt is disabled, use all pf queues */
3736 if (rte_port->dev_info.vmdq_pool_base == 0) {
3737 nb_rxq = rte_port->dev_info.max_rx_queues;
3738 nb_txq = rte_port->dev_info.max_tx_queues;
3740 nb_rxq = (queueid_t)num_tcs;
3741 nb_txq = (queueid_t)num_tcs;
3745 rx_free_thresh = 64;
3747 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3749 rxtx_port_config(rte_port);
3751 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3752 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3753 rx_vft_set(pid, vlan_tags[i], 1);
3755 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3759 map_port_queue_stats_mapping_registers(pid, rte_port);
3761 rte_port->dcb_flag = 1;
3771 /* Configuration of Ethernet ports. */
3772 ports = rte_zmalloc("testpmd: ports",
3773 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3774 RTE_CACHE_LINE_SIZE);
3775 if (ports == NULL) {
3776 rte_exit(EXIT_FAILURE,
3777 "rte_zmalloc(%d struct rte_port) failed\n",
3780 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3781 LIST_INIT(&ports[i].flow_tunnel_list);
3782 /* Initialize ports NUMA structures */
3783 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3784 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3785 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3799 const char clr[] = { 27, '[', '2', 'J', '\0' };
3800 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3802 /* Clear screen and move to top left */
3803 printf("%s%s", clr, top_left);
3805 printf("\nPort statistics ====================================");
3806 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3807 nic_stats_display(fwd_ports_ids[i]);
3813 signal_handler(int signum)
3815 if (signum == SIGINT || signum == SIGTERM) {
3816 printf("\nSignal %d received, preparing to exit...\n",
3818 #ifdef RTE_LIB_PDUMP
3819 /* uninitialize packet capture framework */
3822 #ifdef RTE_LIB_LATENCYSTATS
3823 if (latencystats_enabled != 0)
3824 rte_latencystats_uninit();
3827 /* Set flag to indicate the force termination. */
3829 /* exit with the expected status */
3830 signal(signum, SIG_DFL);
3831 kill(getpid(), signum);
3836 main(int argc, char** argv)
3843 signal(SIGINT, signal_handler);
3844 signal(SIGTERM, signal_handler);
3846 testpmd_logtype = rte_log_register("testpmd");
3847 if (testpmd_logtype < 0)
3848 rte_exit(EXIT_FAILURE, "Cannot register log type");
3849 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3851 diag = rte_eal_init(argc, argv);
3853 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3854 rte_strerror(rte_errno));
3856 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3857 rte_exit(EXIT_FAILURE,
3858 "Secondary process type not supported.\n");
3860 ret = register_eth_event_callback();
3862 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3864 #ifdef RTE_LIB_PDUMP
3865 /* initialize packet capture framework */
3870 RTE_ETH_FOREACH_DEV(port_id) {
3871 ports_ids[count] = port_id;
3874 nb_ports = (portid_t) count;
3876 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3878 /* allocate port structures, and init them */
3881 set_def_fwd_config();
3883 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3884 "Check the core mask argument\n");
3886 /* Bitrate/latency stats disabled by default */
3887 #ifdef RTE_LIB_BITRATESTATS
3888 bitrate_enabled = 0;
3890 #ifdef RTE_LIB_LATENCYSTATS
3891 latencystats_enabled = 0;
3894 /* on FreeBSD, mlockall() is disabled by default */
3895 #ifdef RTE_EXEC_ENV_FREEBSD
3904 launch_args_parse(argc, argv);
3906 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3907 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3911 if (tx_first && interactive)
3912 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3913 "interactive mode.\n");
3915 if (tx_first && lsc_interrupt) {
3916 printf("Warning: lsc_interrupt needs to be off when "
3917 " using tx_first. Disabling.\n");
3921 if (!nb_rxq && !nb_txq)
3922 printf("Warning: Either rx or tx queues should be non-zero\n");
3924 if (nb_rxq > 1 && nb_rxq > nb_txq)
3925 printf("Warning: nb_rxq=%d enables RSS configuration, "
3926 "but nb_txq=%d will prevent to fully test it.\n",
3932 ret = rte_dev_hotplug_handle_enable();
3935 "fail to enable hotplug handling.");
3939 ret = rte_dev_event_monitor_start();
3942 "fail to start device event monitoring.");
3946 ret = rte_dev_event_callback_register(NULL,
3947 dev_event_callback, NULL);
3950 "fail to register device event callback\n");
3955 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3956 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3958 /* set all ports to promiscuous mode by default */
3959 RTE_ETH_FOREACH_DEV(port_id) {
3960 ret = rte_eth_promiscuous_enable(port_id);
3962 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3963 port_id, rte_strerror(-ret));
3966 /* Init metrics library */
3967 rte_metrics_init(rte_socket_id());
3969 #ifdef RTE_LIB_LATENCYSTATS
3970 if (latencystats_enabled != 0) {
3971 int ret = rte_latencystats_init(1, NULL);
3973 printf("Warning: latencystats init()"
3974 " returned error %d\n", ret);
3975 printf("Latencystats running on lcore %d\n",
3976 latencystats_lcore_id);
3980 /* Setup bitrate stats */
3981 #ifdef RTE_LIB_BITRATESTATS
3982 if (bitrate_enabled != 0) {
3983 bitrate_data = rte_stats_bitrate_create();
3984 if (bitrate_data == NULL)
3985 rte_exit(EXIT_FAILURE,
3986 "Could not allocate bitrate data.\n");
3987 rte_stats_bitrate_reg(bitrate_data);
3991 #ifdef RTE_LIB_CMDLINE
3992 if (strlen(cmdline_filename) != 0)
3993 cmdline_read_from_file(cmdline_filename);
3995 if (interactive == 1) {
3997 printf("Start automatic packet forwarding\n");
3998 start_packet_forwarding(0);
4010 printf("No commandline core given, start packet forwarding\n");
4011 start_packet_forwarding(tx_first);
4012 if (stats_period != 0) {
4013 uint64_t prev_time = 0, cur_time, diff_time = 0;
4014 uint64_t timer_period;
4016 /* Convert to number of cycles */
4017 timer_period = stats_period * rte_get_timer_hz();
4019 while (f_quit == 0) {
4020 cur_time = rte_get_timer_cycles();
4021 diff_time += cur_time - prev_time;
4023 if (diff_time >= timer_period) {
4025 /* Reset the timer */
4028 /* Sleep to avoid unnecessary checks */
4029 prev_time = cur_time;
4034 printf("Press enter to exit\n");
4035 rc = read(0, &c, 1);
4041 ret = rte_eal_cleanup();
4043 rte_exit(EXIT_FAILURE,
4044 "EAL cleanup failed: %s\n", strerror(-ret));
4046 return EXIT_SUCCESS;