1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
50 #include <rte_pmd_ixgbe.h>
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIB_BITRATESTATS
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIB_LATENCYSTATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use main core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 &five_tuple_swap_fwd_engine,
183 #ifdef RTE_LIBRTE_IEEE1588
184 &ieee1588_fwd_engine,
189 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
190 uint16_t mempool_flags;
192 struct fwd_config cur_fwd_config;
193 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194 uint32_t retry_enabled;
195 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
198 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
199 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
200 DEFAULT_MBUF_DATA_SIZE
201 }; /**< Mbuf data space size. */
202 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
203 * specified on command-line. */
204 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
207 * In container, it cannot terminate the process which running with 'stats-period'
208 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
213 * Configuration of packet segments used to scatter received packets
214 * if some of split features is configured.
216 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
217 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
218 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
219 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
222 * Configuration of packet segments used by the "txonly" processing engine.
224 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226 TXONLY_DEF_PACKET_LEN,
228 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
230 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
231 /**< Split policy for packets to TX. */
233 uint8_t txonly_multi_flow;
234 /**< Whether multiple flows are generated in TXONLY mode. */
236 uint32_t tx_pkt_times_inter;
237 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
239 uint32_t tx_pkt_times_intra;
240 /**< Timings for send scheduling in TXONLY mode, time between packets. */
242 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
245 /* current configuration is in DCB or not,0 means it is not in DCB mode */
246 uint8_t dcb_config = 0;
248 /* Whether the dcb is in testing status */
249 uint8_t dcb_test = 0;
252 * Configurable number of RX/TX queues.
254 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
255 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
256 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
259 * Configurable number of RX/TX ring descriptors.
260 * Defaults are supplied by drivers via ethdev.
262 #define RTE_TEST_RX_DESC_DEFAULT 0
263 #define RTE_TEST_TX_DESC_DEFAULT 0
264 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
265 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
267 #define RTE_PMD_PARAM_UNSET -1
269 * Configurable values of RX and TX ring threshold registers.
272 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
273 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
274 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
276 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
277 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
278 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
281 * Configurable value of RX free threshold.
283 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
286 * Configurable value of RX drop enable.
288 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
291 * Configurable value of TX free threshold.
293 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
296 * Configurable value of TX RS bit threshold.
298 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
301 * Configurable value of buffered packets before sending.
303 uint16_t noisy_tx_sw_bufsz;
306 * Configurable value of packet buffer timeout.
308 uint16_t noisy_tx_sw_buf_flush_time;
311 * Configurable value for size of VNF internal memory area
312 * used for simulating noisy neighbour behaviour
314 uint64_t noisy_lkup_mem_sz;
317 * Configurable value of number of random writes done in
318 * VNF simulation memory area.
320 uint64_t noisy_lkup_num_writes;
323 * Configurable value of number of random reads done in
324 * VNF simulation memory area.
326 uint64_t noisy_lkup_num_reads;
329 * Configurable value of number of random reads/writes done in
330 * VNF simulation memory area.
332 uint64_t noisy_lkup_num_reads_writes;
335 * Receive Side Scaling (RSS) configuration.
337 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
340 * Port topology configuration
342 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
345 * Avoids to flush all the RX streams before starts forwarding.
347 uint8_t no_flush_rx = 0; /* flush by default */
350 * Flow API isolated mode.
352 uint8_t flow_isolate_all;
355 * Avoids to check link status when starting/stopping a port.
357 uint8_t no_link_check = 0; /* check by default */
360 * Don't automatically start all ports in interactive mode.
362 uint8_t no_device_start = 0;
365 * Enable link status change notification
367 uint8_t lsc_interrupt = 1; /* enabled by default */
370 * Enable device removal notification.
372 uint8_t rmv_interrupt = 1; /* enabled by default */
374 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
376 /* After attach, port setup is called on event or by iterator */
377 bool setup_on_probe_event = true;
379 /* Clear ptypes on port initialization. */
380 uint8_t clear_ptypes = true;
382 /* Hairpin ports configuration mode. */
383 uint16_t hairpin_mode;
385 /* Pretty printing of ethdev events */
386 static const char * const eth_event_desc[] = {
387 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
388 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
389 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
390 [RTE_ETH_EVENT_INTR_RESET] = "reset",
391 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
392 [RTE_ETH_EVENT_IPSEC] = "IPsec",
393 [RTE_ETH_EVENT_MACSEC] = "MACsec",
394 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
395 [RTE_ETH_EVENT_NEW] = "device probed",
396 [RTE_ETH_EVENT_DESTROY] = "device released",
397 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
398 [RTE_ETH_EVENT_MAX] = NULL,
402 * Display or mask ether events
403 * Default to all events except VF_MBOX
405 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
406 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
407 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
408 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
409 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
410 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
411 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
412 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
414 * Decide if all memory are locked for performance.
419 * NIC bypass mode configuration options.
422 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
423 /* The NIC bypass watchdog timeout. */
424 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
428 #ifdef RTE_LIB_LATENCYSTATS
431 * Set when latency stats is enabled in the commandline
433 uint8_t latencystats_enabled;
436 * Lcore ID to serive latency statistics.
438 lcoreid_t latencystats_lcore_id = -1;
443 * Ethernet device configuration.
445 struct rte_eth_rxmode rx_mode = {
446 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
447 /**< Default maximum frame length. */
450 struct rte_eth_txmode tx_mode = {
451 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
454 struct rte_fdir_conf fdir_conf = {
455 .mode = RTE_FDIR_MODE_NONE,
456 .pballoc = RTE_FDIR_PBALLOC_64K,
457 .status = RTE_FDIR_REPORT_STATUS,
459 .vlan_tci_mask = 0xFFEF,
461 .src_ip = 0xFFFFFFFF,
462 .dst_ip = 0xFFFFFFFF,
465 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
466 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
468 .src_port_mask = 0xFFFF,
469 .dst_port_mask = 0xFFFF,
470 .mac_addr_byte_mask = 0xFF,
471 .tunnel_type_mask = 1,
472 .tunnel_id_mask = 0xFFFFFFFF,
477 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
479 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
480 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
482 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
483 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
485 uint16_t nb_tx_queue_stats_mappings = 0;
486 uint16_t nb_rx_queue_stats_mappings = 0;
489 * Display zero values by default for xstats
491 uint8_t xstats_hide_zero;
494 * Measure of CPU cycles disabled by default
496 uint8_t record_core_cycles;
499 * Display of RX and TX bursts disabled by default
501 uint8_t record_burst_stats;
503 unsigned int num_sockets = 0;
504 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
506 #ifdef RTE_LIB_BITRATESTATS
507 /* Bitrate statistics */
508 struct rte_stats_bitrates *bitrate_data;
509 lcoreid_t bitrate_lcore_id;
510 uint8_t bitrate_enabled;
513 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
514 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
517 * hexadecimal bitmask of RX mq mode can be enabled.
519 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
521 /* Forward function declarations */
522 static void setup_attached_port(portid_t pi);
523 static void map_port_queue_stats_mapping_registers(portid_t pi,
524 struct rte_port *port);
525 static void check_all_ports_link_status(uint32_t port_mask);
526 static int eth_event_callback(portid_t port_id,
527 enum rte_eth_event_type type,
528 void *param, void *ret_param);
529 static void dev_event_callback(const char *device_name,
530 enum rte_dev_event_type type,
534 * Check if all the ports are started.
535 * If yes, return positive value. If not, return zero.
537 static int all_ports_started(void);
539 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
540 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
542 /* Holds the registered mbuf dynamic flags names. */
543 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
546 * Helper function to check if socket is already discovered.
547 * If yes, return positive value. If not, return zero.
550 new_socket_id(unsigned int socket_id)
554 for (i = 0; i < num_sockets; i++) {
555 if (socket_ids[i] == socket_id)
562 * Setup default configuration.
565 set_default_fwd_lcores_config(void)
569 unsigned int sock_num;
572 for (i = 0; i < RTE_MAX_LCORE; i++) {
573 if (!rte_lcore_is_enabled(i))
575 sock_num = rte_lcore_to_socket_id(i);
576 if (new_socket_id(sock_num)) {
577 if (num_sockets >= RTE_MAX_NUMA_NODES) {
578 rte_exit(EXIT_FAILURE,
579 "Total sockets greater than %u\n",
582 socket_ids[num_sockets++] = sock_num;
584 if (i == rte_get_main_lcore())
586 fwd_lcores_cpuids[nb_lc++] = i;
588 nb_lcores = (lcoreid_t) nb_lc;
589 nb_cfg_lcores = nb_lcores;
594 set_def_peer_eth_addrs(void)
598 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
599 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
600 peer_eth_addrs[i].addr_bytes[5] = i;
605 set_default_fwd_ports_config(void)
610 RTE_ETH_FOREACH_DEV(pt_id) {
611 fwd_ports_ids[i++] = pt_id;
613 /* Update sockets info according to the attached device */
614 int socket_id = rte_eth_dev_socket_id(pt_id);
615 if (socket_id >= 0 && new_socket_id(socket_id)) {
616 if (num_sockets >= RTE_MAX_NUMA_NODES) {
617 rte_exit(EXIT_FAILURE,
618 "Total sockets greater than %u\n",
621 socket_ids[num_sockets++] = socket_id;
625 nb_cfg_ports = nb_ports;
626 nb_fwd_ports = nb_ports;
630 set_def_fwd_config(void)
632 set_default_fwd_lcores_config();
633 set_def_peer_eth_addrs();
634 set_default_fwd_ports_config();
637 /* extremely pessimistic estimation of memory required to create a mempool */
639 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
641 unsigned int n_pages, mbuf_per_pg, leftover;
642 uint64_t total_mem, mbuf_mem, obj_sz;
644 /* there is no good way to predict how much space the mempool will
645 * occupy because it will allocate chunks on the fly, and some of those
646 * will come from default DPDK memory while some will come from our
647 * external memory, so just assume 128MB will be enough for everyone.
649 uint64_t hdr_mem = 128 << 20;
651 /* account for possible non-contiguousness */
652 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
654 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
658 mbuf_per_pg = pgsz / obj_sz;
659 leftover = (nb_mbufs % mbuf_per_pg) > 0;
660 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
662 mbuf_mem = n_pages * pgsz;
664 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
666 if (total_mem > SIZE_MAX) {
667 TESTPMD_LOG(ERR, "Memory size too big\n");
670 *out = (size_t)total_mem;
676 pagesz_flags(uint64_t page_sz)
678 /* as per mmap() manpage, all page sizes are log2 of page size
679 * shifted by MAP_HUGE_SHIFT
681 int log2 = rte_log2_u64(page_sz);
683 return (log2 << HUGE_SHIFT);
687 alloc_mem(size_t memsz, size_t pgsz, bool huge)
692 /* allocate anonymous hugepages */
693 flags = MAP_ANONYMOUS | MAP_PRIVATE;
695 flags |= HUGE_FLAG | pagesz_flags(pgsz);
697 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
698 if (addr == MAP_FAILED)
704 struct extmem_param {
708 rte_iova_t *iova_table;
709 unsigned int iova_table_len;
713 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
716 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
717 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
718 unsigned int cur_page, n_pages, pgsz_idx;
719 size_t mem_sz, cur_pgsz;
720 rte_iova_t *iovas = NULL;
724 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
725 /* skip anything that is too big */
726 if (pgsizes[pgsz_idx] > SIZE_MAX)
729 cur_pgsz = pgsizes[pgsz_idx];
731 /* if we were told not to allocate hugepages, override */
733 cur_pgsz = sysconf(_SC_PAGESIZE);
735 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
737 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
741 /* allocate our memory */
742 addr = alloc_mem(mem_sz, cur_pgsz, huge);
744 /* if we couldn't allocate memory with a specified page size,
745 * that doesn't mean we can't do it with other page sizes, so
751 /* store IOVA addresses for every page in this memory area */
752 n_pages = mem_sz / cur_pgsz;
754 iovas = malloc(sizeof(*iovas) * n_pages);
757 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
760 /* lock memory if it's not huge pages */
764 /* populate IOVA addresses */
765 for (cur_page = 0; cur_page < n_pages; cur_page++) {
770 offset = cur_pgsz * cur_page;
771 cur = RTE_PTR_ADD(addr, offset);
773 /* touch the page before getting its IOVA */
774 *(volatile char *)cur = 0;
776 iova = rte_mem_virt2iova(cur);
778 iovas[cur_page] = iova;
783 /* if we couldn't allocate anything */
789 param->pgsz = cur_pgsz;
790 param->iova_table = iovas;
791 param->iova_table_len = n_pages;
798 munmap(addr, mem_sz);
804 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
806 struct extmem_param param;
809 memset(¶m, 0, sizeof(param));
811 /* check if our heap exists */
812 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
814 /* create our heap */
815 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
817 TESTPMD_LOG(ERR, "Cannot create heap\n");
822 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
824 TESTPMD_LOG(ERR, "Cannot create memory area\n");
828 /* we now have a valid memory area, so add it to heap */
829 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
830 param.addr, param.len, param.iova_table,
831 param.iova_table_len, param.pgsz);
833 /* when using VFIO, memory is automatically mapped for DMA by EAL */
835 /* not needed any more */
836 free(param.iova_table);
839 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
840 munmap(param.addr, param.len);
846 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
852 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
853 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
858 RTE_ETH_FOREACH_DEV(pid) {
859 struct rte_eth_dev *dev =
860 &rte_eth_devices[pid];
862 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
866 "unable to DMA unmap addr 0x%p "
868 memhdr->addr, dev->data->name);
871 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
874 "unable to un-register addr 0x%p\n", memhdr->addr);
879 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
880 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
883 size_t page_size = sysconf(_SC_PAGESIZE);
886 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
890 "unable to register addr 0x%p\n", memhdr->addr);
893 RTE_ETH_FOREACH_DEV(pid) {
894 struct rte_eth_dev *dev =
895 &rte_eth_devices[pid];
897 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
901 "unable to DMA map addr 0x%p "
903 memhdr->addr, dev->data->name);
909 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
910 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
912 struct rte_pktmbuf_extmem *xmem;
913 unsigned int ext_num, zone_num, elt_num;
916 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
917 elt_num = EXTBUF_ZONE_SIZE / elt_size;
918 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
920 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
922 TESTPMD_LOG(ERR, "Cannot allocate memory for "
923 "external buffer descriptors\n");
927 for (ext_num = 0; ext_num < zone_num; ext_num++) {
928 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
929 const struct rte_memzone *mz;
930 char mz_name[RTE_MEMZONE_NAMESIZE];
933 ret = snprintf(mz_name, sizeof(mz_name),
934 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
935 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
936 errno = ENAMETOOLONG;
940 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
942 RTE_MEMZONE_IOVA_CONTIG |
944 RTE_MEMZONE_SIZE_HINT_ONLY,
948 * The caller exits on external buffer creation
949 * error, so there is no need to free memzones.
955 xseg->buf_ptr = mz->addr;
956 xseg->buf_iova = mz->iova;
957 xseg->buf_len = EXTBUF_ZONE_SIZE;
958 xseg->elt_size = elt_size;
960 if (ext_num == 0 && xmem != NULL) {
969 * Configuration initialisation done once at init time.
971 static struct rte_mempool *
972 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
973 unsigned int socket_id, uint16_t size_idx)
975 char pool_name[RTE_MEMPOOL_NAMESIZE];
976 struct rte_mempool *rte_mp = NULL;
979 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
980 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
983 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
984 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
986 switch (mp_alloc_type) {
987 case MP_ALLOC_NATIVE:
989 /* wrapper to rte_mempool_create() */
990 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
991 rte_mbuf_best_mempool_ops());
992 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
993 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
998 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
999 mb_size, (unsigned int) mb_mempool_cache,
1000 sizeof(struct rte_pktmbuf_pool_private),
1001 socket_id, mempool_flags);
1005 if (rte_mempool_populate_anon(rte_mp) == 0) {
1006 rte_mempool_free(rte_mp);
1010 rte_pktmbuf_pool_init(rte_mp, NULL);
1011 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1012 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1016 case MP_ALLOC_XMEM_HUGE:
1019 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1021 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1022 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1025 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1026 if (heap_socket < 0)
1027 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1029 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1030 rte_mbuf_best_mempool_ops());
1031 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1032 mb_mempool_cache, 0, mbuf_seg_size,
1038 struct rte_pktmbuf_extmem *ext_mem;
1039 unsigned int ext_num;
1041 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1042 socket_id, pool_name, &ext_mem);
1044 rte_exit(EXIT_FAILURE,
1045 "Can't create pinned data buffers\n");
1047 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1048 rte_mbuf_best_mempool_ops());
1049 rte_mp = rte_pktmbuf_pool_create_extbuf
1050 (pool_name, nb_mbuf, mb_mempool_cache,
1051 0, mbuf_seg_size, socket_id,
1058 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1063 if (rte_mp == NULL) {
1064 rte_exit(EXIT_FAILURE,
1065 "Creation of mbuf pool for socket %u failed: %s\n",
1066 socket_id, rte_strerror(rte_errno));
1067 } else if (verbose_level > 0) {
1068 rte_mempool_dump(stdout, rte_mp);
1074 * Check given socket id is valid or not with NUMA mode,
1075 * if valid, return 0, else return -1
1078 check_socket_id(const unsigned int socket_id)
1080 static int warning_once = 0;
1082 if (new_socket_id(socket_id)) {
1083 if (!warning_once && numa_support)
1084 printf("Warning: NUMA should be configured manually by"
1085 " using --port-numa-config and"
1086 " --ring-numa-config parameters along with"
1095 * Get the allowed maximum number of RX queues.
1096 * *pid return the port id which has minimal value of
1097 * max_rx_queues in all ports.
1100 get_allowed_max_nb_rxq(portid_t *pid)
1102 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1103 bool max_rxq_valid = false;
1105 struct rte_eth_dev_info dev_info;
1107 RTE_ETH_FOREACH_DEV(pi) {
1108 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1111 max_rxq_valid = true;
1112 if (dev_info.max_rx_queues < allowed_max_rxq) {
1113 allowed_max_rxq = dev_info.max_rx_queues;
1117 return max_rxq_valid ? allowed_max_rxq : 0;
1121 * Check input rxq is valid or not.
1122 * If input rxq is not greater than any of maximum number
1123 * of RX queues of all ports, it is valid.
1124 * if valid, return 0, else return -1
1127 check_nb_rxq(queueid_t rxq)
1129 queueid_t allowed_max_rxq;
1132 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1133 if (rxq > allowed_max_rxq) {
1134 printf("Fail: input rxq (%u) can't be greater "
1135 "than max_rx_queues (%u) of port %u\n",
1145 * Get the allowed maximum number of TX queues.
1146 * *pid return the port id which has minimal value of
1147 * max_tx_queues in all ports.
1150 get_allowed_max_nb_txq(portid_t *pid)
1152 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1153 bool max_txq_valid = false;
1155 struct rte_eth_dev_info dev_info;
1157 RTE_ETH_FOREACH_DEV(pi) {
1158 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1161 max_txq_valid = true;
1162 if (dev_info.max_tx_queues < allowed_max_txq) {
1163 allowed_max_txq = dev_info.max_tx_queues;
1167 return max_txq_valid ? allowed_max_txq : 0;
1171 * Check input txq is valid or not.
1172 * If input txq is not greater than any of maximum number
1173 * of TX queues of all ports, it is valid.
1174 * if valid, return 0, else return -1
1177 check_nb_txq(queueid_t txq)
1179 queueid_t allowed_max_txq;
1182 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1183 if (txq > allowed_max_txq) {
1184 printf("Fail: input txq (%u) can't be greater "
1185 "than max_tx_queues (%u) of port %u\n",
1195 * Get the allowed maximum number of RXDs of every rx queue.
1196 * *pid return the port id which has minimal value of
1197 * max_rxd in all queues of all ports.
1200 get_allowed_max_nb_rxd(portid_t *pid)
1202 uint16_t allowed_max_rxd = UINT16_MAX;
1204 struct rte_eth_dev_info dev_info;
1206 RTE_ETH_FOREACH_DEV(pi) {
1207 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1210 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1211 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1215 return allowed_max_rxd;
1219 * Get the allowed minimal number of RXDs of every rx queue.
1220 * *pid return the port id which has minimal value of
1221 * min_rxd in all queues of all ports.
1224 get_allowed_min_nb_rxd(portid_t *pid)
1226 uint16_t allowed_min_rxd = 0;
1228 struct rte_eth_dev_info dev_info;
1230 RTE_ETH_FOREACH_DEV(pi) {
1231 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1234 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1235 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1240 return allowed_min_rxd;
1244 * Check input rxd is valid or not.
1245 * If input rxd is not greater than any of maximum number
1246 * of RXDs of every Rx queues and is not less than any of
1247 * minimal number of RXDs of every Rx queues, it is valid.
1248 * if valid, return 0, else return -1
1251 check_nb_rxd(queueid_t rxd)
1253 uint16_t allowed_max_rxd;
1254 uint16_t allowed_min_rxd;
1257 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1258 if (rxd > allowed_max_rxd) {
1259 printf("Fail: input rxd (%u) can't be greater "
1260 "than max_rxds (%u) of port %u\n",
1267 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1268 if (rxd < allowed_min_rxd) {
1269 printf("Fail: input rxd (%u) can't be less "
1270 "than min_rxds (%u) of port %u\n",
1281 * Get the allowed maximum number of TXDs of every rx queues.
1282 * *pid return the port id which has minimal value of
1283 * max_txd in every tx queue.
1286 get_allowed_max_nb_txd(portid_t *pid)
1288 uint16_t allowed_max_txd = UINT16_MAX;
1290 struct rte_eth_dev_info dev_info;
1292 RTE_ETH_FOREACH_DEV(pi) {
1293 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1296 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1297 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1301 return allowed_max_txd;
1305 * Get the allowed maximum number of TXDs of every tx queues.
1306 * *pid return the port id which has minimal value of
1307 * min_txd in every tx queue.
1310 get_allowed_min_nb_txd(portid_t *pid)
1312 uint16_t allowed_min_txd = 0;
1314 struct rte_eth_dev_info dev_info;
1316 RTE_ETH_FOREACH_DEV(pi) {
1317 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1320 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1321 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1326 return allowed_min_txd;
1330 * Check input txd is valid or not.
1331 * If input txd is not greater than any of maximum number
1332 * of TXDs of every Rx queues, it is valid.
1333 * if valid, return 0, else return -1
1336 check_nb_txd(queueid_t txd)
1338 uint16_t allowed_max_txd;
1339 uint16_t allowed_min_txd;
1342 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1343 if (txd > allowed_max_txd) {
1344 printf("Fail: input txd (%u) can't be greater "
1345 "than max_txds (%u) of port %u\n",
1352 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1353 if (txd < allowed_min_txd) {
1354 printf("Fail: input txd (%u) can't be less "
1355 "than min_txds (%u) of port %u\n",
1366 * Get the allowed maximum number of hairpin queues.
1367 * *pid return the port id which has minimal value of
1368 * max_hairpin_queues in all ports.
1371 get_allowed_max_nb_hairpinq(portid_t *pid)
1373 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1375 struct rte_eth_hairpin_cap cap;
1377 RTE_ETH_FOREACH_DEV(pi) {
1378 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1382 if (cap.max_nb_queues < allowed_max_hairpinq) {
1383 allowed_max_hairpinq = cap.max_nb_queues;
1387 return allowed_max_hairpinq;
1391 * Check input hairpin is valid or not.
1392 * If input hairpin is not greater than any of maximum number
1393 * of hairpin queues of all ports, it is valid.
1394 * if valid, return 0, else return -1
1397 check_nb_hairpinq(queueid_t hairpinq)
1399 queueid_t allowed_max_hairpinq;
1402 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1403 if (hairpinq > allowed_max_hairpinq) {
1404 printf("Fail: input hairpin (%u) can't be greater "
1405 "than max_hairpin_queues (%u) of port %u\n",
1406 hairpinq, allowed_max_hairpinq, pid);
1416 struct rte_port *port;
1417 struct rte_mempool *mbp;
1418 unsigned int nb_mbuf_per_pool;
1420 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1421 struct rte_gro_param gro_param;
1424 uint16_t overhead_len;
1429 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1431 /* Configuration of logical cores. */
1432 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1433 sizeof(struct fwd_lcore *) * nb_lcores,
1434 RTE_CACHE_LINE_SIZE);
1435 if (fwd_lcores == NULL) {
1436 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1437 "failed\n", nb_lcores);
1439 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1440 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1441 sizeof(struct fwd_lcore),
1442 RTE_CACHE_LINE_SIZE);
1443 if (fwd_lcores[lc_id] == NULL) {
1444 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1447 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1450 RTE_ETH_FOREACH_DEV(pid) {
1452 /* Apply default TxRx configuration for all ports */
1453 port->dev_conf.txmode = tx_mode;
1454 port->dev_conf.rxmode = rx_mode;
1456 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1458 rte_exit(EXIT_FAILURE,
1459 "rte_eth_dev_info_get() failed\n");
1461 /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
1462 if (port->dev_info.max_rx_pktlen && port->dev_info.max_mtu)
1463 overhead_len = port->dev_info.max_rx_pktlen -
1464 port->dev_info.max_mtu;
1466 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1468 port->dev_conf.rxmode.max_rx_pkt_len =
1469 RTE_ETHER_MTU + overhead_len;
1472 * This is workaround to avoid resize max rx packet len.
1473 * Ethdev assumes jumbo frame size must be greater than
1474 * RTE_ETHER_MAX_LEN, and will resize 'max_rx_pkt_len' to
1475 * default value when it is greater than RTE_ETHER_MAX_LEN
1478 if (port->dev_conf.rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN) {
1479 port->dev_conf.rxmode.offloads |=
1480 DEV_RX_OFFLOAD_JUMBO_FRAME;
1483 if (!(port->dev_info.tx_offload_capa &
1484 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1485 port->dev_conf.txmode.offloads &=
1486 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1488 if (port_numa[pid] != NUMA_NO_CONFIG)
1489 port_per_socket[port_numa[pid]]++;
1491 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1494 * if socket_id is invalid,
1495 * set to the first available socket.
1497 if (check_socket_id(socket_id) < 0)
1498 socket_id = socket_ids[0];
1499 port_per_socket[socket_id]++;
1503 /* Apply Rx offloads configuration */
1504 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1505 port->rx_conf[k].offloads =
1506 port->dev_conf.rxmode.offloads;
1507 /* Apply Tx offloads configuration */
1508 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1509 port->tx_conf[k].offloads =
1510 port->dev_conf.txmode.offloads;
1512 /* set flag to initialize port/queue */
1513 port->need_reconfig = 1;
1514 port->need_reconfig_queues = 1;
1515 port->tx_metadata = 0;
1517 /* Check for maximum number of segments per MTU. Accordingly
1518 * update the mbuf data size.
1520 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1521 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1522 data_size = rx_mode.max_rx_pkt_len /
1523 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1525 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1526 mbuf_data_size[0]) {
1527 mbuf_data_size[0] = data_size +
1528 RTE_PKTMBUF_HEADROOM;
1535 TESTPMD_LOG(WARNING,
1536 "Configured mbuf size of the first segment %hu\n",
1539 * Create pools of mbuf.
1540 * If NUMA support is disabled, create a single pool of mbuf in
1541 * socket 0 memory by default.
1542 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1544 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1545 * nb_txd can be configured at run time.
1547 if (param_total_num_mbufs)
1548 nb_mbuf_per_pool = param_total_num_mbufs;
1550 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1551 (nb_lcores * mb_mempool_cache) +
1552 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1553 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1559 for (i = 0; i < num_sockets; i++)
1560 for (j = 0; j < mbuf_data_size_n; j++)
1561 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1562 mbuf_pool_create(mbuf_data_size[j],
1568 for (i = 0; i < mbuf_data_size_n; i++)
1569 mempools[i] = mbuf_pool_create
1572 socket_num == UMA_NO_CONFIG ?
1578 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1579 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1581 * Records which Mbuf pool to use by each logical core, if needed.
1583 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1584 mbp = mbuf_pool_find(
1585 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1588 mbp = mbuf_pool_find(0, 0);
1589 fwd_lcores[lc_id]->mbp = mbp;
1590 /* initialize GSO context */
1591 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1592 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1593 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1594 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1596 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1599 /* Configuration of packet forwarding streams. */
1600 if (init_fwd_streams() < 0)
1601 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1605 /* create a gro context for each lcore */
1606 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1607 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1608 gro_param.max_item_per_flow = MAX_PKT_BURST;
1609 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1610 gro_param.socket_id = rte_lcore_to_socket_id(
1611 fwd_lcores_cpuids[lc_id]);
1612 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1613 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1614 rte_exit(EXIT_FAILURE,
1615 "rte_gro_ctx_create() failed\n");
1622 reconfig(portid_t new_port_id, unsigned socket_id)
1624 struct rte_port *port;
1627 /* Reconfiguration of Ethernet ports. */
1628 port = &ports[new_port_id];
1630 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1634 /* set flag to initialize port/queue */
1635 port->need_reconfig = 1;
1636 port->need_reconfig_queues = 1;
1637 port->socket_id = socket_id;
1644 init_fwd_streams(void)
1647 struct rte_port *port;
1648 streamid_t sm_id, nb_fwd_streams_new;
1651 /* set socket id according to numa or not */
1652 RTE_ETH_FOREACH_DEV(pid) {
1654 if (nb_rxq > port->dev_info.max_rx_queues) {
1655 printf("Fail: nb_rxq(%d) is greater than "
1656 "max_rx_queues(%d)\n", nb_rxq,
1657 port->dev_info.max_rx_queues);
1660 if (nb_txq > port->dev_info.max_tx_queues) {
1661 printf("Fail: nb_txq(%d) is greater than "
1662 "max_tx_queues(%d)\n", nb_txq,
1663 port->dev_info.max_tx_queues);
1667 if (port_numa[pid] != NUMA_NO_CONFIG)
1668 port->socket_id = port_numa[pid];
1670 port->socket_id = rte_eth_dev_socket_id(pid);
1673 * if socket_id is invalid,
1674 * set to the first available socket.
1676 if (check_socket_id(port->socket_id) < 0)
1677 port->socket_id = socket_ids[0];
1681 if (socket_num == UMA_NO_CONFIG)
1682 port->socket_id = 0;
1684 port->socket_id = socket_num;
1688 q = RTE_MAX(nb_rxq, nb_txq);
1690 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1693 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1694 if (nb_fwd_streams_new == nb_fwd_streams)
1697 if (fwd_streams != NULL) {
1698 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1699 if (fwd_streams[sm_id] == NULL)
1701 rte_free(fwd_streams[sm_id]);
1702 fwd_streams[sm_id] = NULL;
1704 rte_free(fwd_streams);
1709 nb_fwd_streams = nb_fwd_streams_new;
1710 if (nb_fwd_streams) {
1711 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1712 sizeof(struct fwd_stream *) * nb_fwd_streams,
1713 RTE_CACHE_LINE_SIZE);
1714 if (fwd_streams == NULL)
1715 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1716 " (struct fwd_stream *)) failed\n",
1719 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1720 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1721 " struct fwd_stream", sizeof(struct fwd_stream),
1722 RTE_CACHE_LINE_SIZE);
1723 if (fwd_streams[sm_id] == NULL)
1724 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1725 "(struct fwd_stream) failed\n");
1733 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1735 uint64_t total_burst, sburst;
1737 uint64_t burst_stats[4];
1738 uint16_t pktnb_stats[4];
1740 int burst_percent[4], sburstp;
1744 * First compute the total number of packet bursts and the
1745 * two highest numbers of bursts of the same number of packets.
1747 memset(&burst_stats, 0x0, sizeof(burst_stats));
1748 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1750 /* Show stats for 0 burst size always */
1751 total_burst = pbs->pkt_burst_spread[0];
1752 burst_stats[0] = pbs->pkt_burst_spread[0];
1755 /* Find the next 2 burst sizes with highest occurrences. */
1756 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1757 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1762 total_burst += nb_burst;
1764 if (nb_burst > burst_stats[1]) {
1765 burst_stats[2] = burst_stats[1];
1766 pktnb_stats[2] = pktnb_stats[1];
1767 burst_stats[1] = nb_burst;
1768 pktnb_stats[1] = nb_pkt;
1769 } else if (nb_burst > burst_stats[2]) {
1770 burst_stats[2] = nb_burst;
1771 pktnb_stats[2] = nb_pkt;
1774 if (total_burst == 0)
1777 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1778 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1780 printf("%d%% of other]\n", 100 - sburstp);
1784 sburst += burst_stats[i];
1785 if (sburst == total_burst) {
1786 printf("%d%% of %d pkts]\n",
1787 100 - sburstp, (int) pktnb_stats[i]);
1792 (double)burst_stats[i] / total_burst * 100;
1793 printf("%d%% of %d pkts + ",
1794 burst_percent[i], (int) pktnb_stats[i]);
1795 sburstp += burst_percent[i];
1800 fwd_stream_stats_display(streamid_t stream_id)
1802 struct fwd_stream *fs;
1803 static const char *fwd_top_stats_border = "-------";
1805 fs = fwd_streams[stream_id];
1806 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1807 (fs->fwd_dropped == 0))
1809 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1810 "TX Port=%2d/Queue=%2d %s\n",
1811 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1812 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1813 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1814 " TX-dropped: %-14"PRIu64,
1815 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1817 /* if checksum mode */
1818 if (cur_fwd_eng == &csum_fwd_engine) {
1819 printf(" RX- bad IP checksum: %-14"PRIu64
1820 " Rx- bad L4 checksum: %-14"PRIu64
1821 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1822 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1823 fs->rx_bad_outer_l4_csum);
1828 if (record_burst_stats) {
1829 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1830 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1835 fwd_stats_display(void)
1837 static const char *fwd_stats_border = "----------------------";
1838 static const char *acc_stats_border = "+++++++++++++++";
1840 struct fwd_stream *rx_stream;
1841 struct fwd_stream *tx_stream;
1842 uint64_t tx_dropped;
1843 uint64_t rx_bad_ip_csum;
1844 uint64_t rx_bad_l4_csum;
1845 uint64_t rx_bad_outer_l4_csum;
1846 } ports_stats[RTE_MAX_ETHPORTS];
1847 uint64_t total_rx_dropped = 0;
1848 uint64_t total_tx_dropped = 0;
1849 uint64_t total_rx_nombuf = 0;
1850 struct rte_eth_stats stats;
1851 uint64_t fwd_cycles = 0;
1852 uint64_t total_recv = 0;
1853 uint64_t total_xmit = 0;
1854 struct rte_port *port;
1859 memset(ports_stats, 0, sizeof(ports_stats));
1861 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1862 struct fwd_stream *fs = fwd_streams[sm_id];
1864 if (cur_fwd_config.nb_fwd_streams >
1865 cur_fwd_config.nb_fwd_ports) {
1866 fwd_stream_stats_display(sm_id);
1868 ports_stats[fs->tx_port].tx_stream = fs;
1869 ports_stats[fs->rx_port].rx_stream = fs;
1872 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1874 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1875 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1876 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1877 fs->rx_bad_outer_l4_csum;
1879 if (record_core_cycles)
1880 fwd_cycles += fs->core_cycles;
1882 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1885 pt_id = fwd_ports_ids[i];
1886 port = &ports[pt_id];
1888 rte_eth_stats_get(pt_id, &stats);
1889 stats.ipackets -= port->stats.ipackets;
1890 stats.opackets -= port->stats.opackets;
1891 stats.ibytes -= port->stats.ibytes;
1892 stats.obytes -= port->stats.obytes;
1893 stats.imissed -= port->stats.imissed;
1894 stats.oerrors -= port->stats.oerrors;
1895 stats.rx_nombuf -= port->stats.rx_nombuf;
1897 total_recv += stats.ipackets;
1898 total_xmit += stats.opackets;
1899 total_rx_dropped += stats.imissed;
1900 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1901 total_tx_dropped += stats.oerrors;
1902 total_rx_nombuf += stats.rx_nombuf;
1904 printf("\n %s Forward statistics for port %-2d %s\n",
1905 fwd_stats_border, pt_id, fwd_stats_border);
1907 if (!port->rx_queue_stats_mapping_enabled &&
1908 !port->tx_queue_stats_mapping_enabled) {
1909 printf(" RX-packets: %-14"PRIu64
1910 " RX-dropped: %-14"PRIu64
1911 "RX-total: %-"PRIu64"\n",
1912 stats.ipackets, stats.imissed,
1913 stats.ipackets + stats.imissed);
1915 if (cur_fwd_eng == &csum_fwd_engine)
1916 printf(" Bad-ipcsum: %-14"PRIu64
1917 " Bad-l4csum: %-14"PRIu64
1918 "Bad-outer-l4csum: %-14"PRIu64"\n",
1919 ports_stats[pt_id].rx_bad_ip_csum,
1920 ports_stats[pt_id].rx_bad_l4_csum,
1921 ports_stats[pt_id].rx_bad_outer_l4_csum);
1922 if (stats.ierrors + stats.rx_nombuf > 0) {
1923 printf(" RX-error: %-"PRIu64"\n",
1925 printf(" RX-nombufs: %-14"PRIu64"\n",
1929 printf(" TX-packets: %-14"PRIu64
1930 " TX-dropped: %-14"PRIu64
1931 "TX-total: %-"PRIu64"\n",
1932 stats.opackets, ports_stats[pt_id].tx_dropped,
1933 stats.opackets + ports_stats[pt_id].tx_dropped);
1935 printf(" RX-packets: %14"PRIu64
1936 " RX-dropped:%14"PRIu64
1937 " RX-total:%14"PRIu64"\n",
1938 stats.ipackets, stats.imissed,
1939 stats.ipackets + stats.imissed);
1941 if (cur_fwd_eng == &csum_fwd_engine)
1942 printf(" Bad-ipcsum:%14"PRIu64
1943 " Bad-l4csum:%14"PRIu64
1944 " Bad-outer-l4csum: %-14"PRIu64"\n",
1945 ports_stats[pt_id].rx_bad_ip_csum,
1946 ports_stats[pt_id].rx_bad_l4_csum,
1947 ports_stats[pt_id].rx_bad_outer_l4_csum);
1948 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1949 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1950 printf(" RX-nombufs: %14"PRIu64"\n",
1954 printf(" TX-packets: %14"PRIu64
1955 " TX-dropped:%14"PRIu64
1956 " TX-total:%14"PRIu64"\n",
1957 stats.opackets, ports_stats[pt_id].tx_dropped,
1958 stats.opackets + ports_stats[pt_id].tx_dropped);
1961 if (record_burst_stats) {
1962 if (ports_stats[pt_id].rx_stream)
1963 pkt_burst_stats_display("RX",
1964 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1965 if (ports_stats[pt_id].tx_stream)
1966 pkt_burst_stats_display("TX",
1967 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1970 if (port->rx_queue_stats_mapping_enabled) {
1972 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1973 printf(" Stats reg %2d RX-packets:%14"PRIu64
1974 " RX-errors:%14"PRIu64
1975 " RX-bytes:%14"PRIu64"\n",
1976 j, stats.q_ipackets[j],
1977 stats.q_errors[j], stats.q_ibytes[j]);
1981 if (port->tx_queue_stats_mapping_enabled) {
1982 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1983 printf(" Stats reg %2d TX-packets:%14"PRIu64
1986 j, stats.q_opackets[j],
1991 printf(" %s--------------------------------%s\n",
1992 fwd_stats_border, fwd_stats_border);
1995 printf("\n %s Accumulated forward statistics for all ports"
1997 acc_stats_border, acc_stats_border);
1998 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2000 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2002 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2003 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2004 if (total_rx_nombuf > 0)
2005 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2006 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2008 acc_stats_border, acc_stats_border);
2009 if (record_core_cycles) {
2010 #define CYC_PER_MHZ 1E6
2011 if (total_recv > 0 || total_xmit > 0) {
2012 uint64_t total_pkts = 0;
2013 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2014 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2015 total_pkts = total_xmit;
2017 total_pkts = total_recv;
2019 printf("\n CPU cycles/packet=%.2F (total cycles="
2020 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2022 (double) fwd_cycles / total_pkts,
2023 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2024 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2030 fwd_stats_reset(void)
2036 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2037 pt_id = fwd_ports_ids[i];
2038 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2040 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2041 struct fwd_stream *fs = fwd_streams[sm_id];
2045 fs->fwd_dropped = 0;
2046 fs->rx_bad_ip_csum = 0;
2047 fs->rx_bad_l4_csum = 0;
2048 fs->rx_bad_outer_l4_csum = 0;
2050 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2051 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2052 fs->core_cycles = 0;
2057 flush_fwd_rx_queues(void)
2059 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2066 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2067 uint64_t timer_period;
2069 /* convert to number of cycles */
2070 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2072 for (j = 0; j < 2; j++) {
2073 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2074 for (rxq = 0; rxq < nb_rxq; rxq++) {
2075 port_id = fwd_ports_ids[rxp];
2077 * testpmd can stuck in the below do while loop
2078 * if rte_eth_rx_burst() always returns nonzero
2079 * packets. So timer is added to exit this loop
2080 * after 1sec timer expiry.
2082 prev_tsc = rte_rdtsc();
2084 nb_rx = rte_eth_rx_burst(port_id, rxq,
2085 pkts_burst, MAX_PKT_BURST);
2086 for (i = 0; i < nb_rx; i++)
2087 rte_pktmbuf_free(pkts_burst[i]);
2089 cur_tsc = rte_rdtsc();
2090 diff_tsc = cur_tsc - prev_tsc;
2091 timer_tsc += diff_tsc;
2092 } while ((nb_rx > 0) &&
2093 (timer_tsc < timer_period));
2097 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2102 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2104 struct fwd_stream **fsm;
2107 #ifdef RTE_LIB_BITRATESTATS
2108 uint64_t tics_per_1sec;
2109 uint64_t tics_datum;
2110 uint64_t tics_current;
2111 uint16_t i, cnt_ports;
2113 cnt_ports = nb_ports;
2114 tics_datum = rte_rdtsc();
2115 tics_per_1sec = rte_get_timer_hz();
2117 fsm = &fwd_streams[fc->stream_idx];
2118 nb_fs = fc->stream_nb;
2120 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2121 (*pkt_fwd)(fsm[sm_id]);
2122 #ifdef RTE_LIB_BITRATESTATS
2123 if (bitrate_enabled != 0 &&
2124 bitrate_lcore_id == rte_lcore_id()) {
2125 tics_current = rte_rdtsc();
2126 if (tics_current - tics_datum >= tics_per_1sec) {
2127 /* Periodic bitrate calculation */
2128 for (i = 0; i < cnt_ports; i++)
2129 rte_stats_bitrate_calc(bitrate_data,
2131 tics_datum = tics_current;
2135 #ifdef RTE_LIB_LATENCYSTATS
2136 if (latencystats_enabled != 0 &&
2137 latencystats_lcore_id == rte_lcore_id())
2138 rte_latencystats_update();
2141 } while (! fc->stopped);
2145 start_pkt_forward_on_core(void *fwd_arg)
2147 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2148 cur_fwd_config.fwd_eng->packet_fwd);
2153 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2154 * Used to start communication flows in network loopback test configurations.
2157 run_one_txonly_burst_on_core(void *fwd_arg)
2159 struct fwd_lcore *fwd_lc;
2160 struct fwd_lcore tmp_lcore;
2162 fwd_lc = (struct fwd_lcore *) fwd_arg;
2163 tmp_lcore = *fwd_lc;
2164 tmp_lcore.stopped = 1;
2165 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2170 * Launch packet forwarding:
2171 * - Setup per-port forwarding context.
2172 * - launch logical cores with their forwarding configuration.
2175 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2177 port_fwd_begin_t port_fwd_begin;
2182 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2183 if (port_fwd_begin != NULL) {
2184 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2185 (*port_fwd_begin)(fwd_ports_ids[i]);
2187 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2188 lc_id = fwd_lcores_cpuids[i];
2189 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2190 fwd_lcores[i]->stopped = 0;
2191 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2192 fwd_lcores[i], lc_id);
2194 printf("launch lcore %u failed - diag=%d\n",
2201 * Launch packet forwarding configuration.
2204 start_packet_forwarding(int with_tx_first)
2206 port_fwd_begin_t port_fwd_begin;
2207 port_fwd_end_t port_fwd_end;
2208 struct rte_port *port;
2212 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2213 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2215 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2216 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2218 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2219 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2220 (!nb_rxq || !nb_txq))
2221 rte_exit(EXIT_FAILURE,
2222 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2223 cur_fwd_eng->fwd_mode_name);
2225 if (all_ports_started() == 0) {
2226 printf("Not all ports were started\n");
2229 if (test_done == 0) {
2230 printf("Packet forwarding already started\n");
2236 for (i = 0; i < nb_fwd_ports; i++) {
2237 pt_id = fwd_ports_ids[i];
2238 port = &ports[pt_id];
2239 if (!port->dcb_flag) {
2240 printf("In DCB mode, all forwarding ports must "
2241 "be configured in this mode.\n");
2245 if (nb_fwd_lcores == 1) {
2246 printf("In DCB mode,the nb forwarding cores "
2247 "should be larger than 1.\n");
2256 flush_fwd_rx_queues();
2258 pkt_fwd_config_display(&cur_fwd_config);
2259 rxtx_config_display();
2262 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2263 pt_id = fwd_ports_ids[i];
2264 port = &ports[pt_id];
2265 map_port_queue_stats_mapping_registers(pt_id, port);
2267 if (with_tx_first) {
2268 port_fwd_begin = tx_only_engine.port_fwd_begin;
2269 if (port_fwd_begin != NULL) {
2270 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2271 (*port_fwd_begin)(fwd_ports_ids[i]);
2273 while (with_tx_first--) {
2274 launch_packet_forwarding(
2275 run_one_txonly_burst_on_core);
2276 rte_eal_mp_wait_lcore();
2278 port_fwd_end = tx_only_engine.port_fwd_end;
2279 if (port_fwd_end != NULL) {
2280 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2281 (*port_fwd_end)(fwd_ports_ids[i]);
2284 launch_packet_forwarding(start_pkt_forward_on_core);
2288 stop_packet_forwarding(void)
2290 port_fwd_end_t port_fwd_end;
2296 printf("Packet forwarding not started\n");
2299 printf("Telling cores to stop...");
2300 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2301 fwd_lcores[lc_id]->stopped = 1;
2302 printf("\nWaiting for lcores to finish...\n");
2303 rte_eal_mp_wait_lcore();
2304 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2305 if (port_fwd_end != NULL) {
2306 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2307 pt_id = fwd_ports_ids[i];
2308 (*port_fwd_end)(pt_id);
2312 fwd_stats_display();
2314 printf("\nDone.\n");
2319 dev_set_link_up(portid_t pid)
2321 if (rte_eth_dev_set_link_up(pid) < 0)
2322 printf("\nSet link up fail.\n");
2326 dev_set_link_down(portid_t pid)
2328 if (rte_eth_dev_set_link_down(pid) < 0)
2329 printf("\nSet link down fail.\n");
2333 all_ports_started(void)
2336 struct rte_port *port;
2338 RTE_ETH_FOREACH_DEV(pi) {
2340 /* Check if there is a port which is not started */
2341 if ((port->port_status != RTE_PORT_STARTED) &&
2342 (port->slave_flag == 0))
2346 /* No port is not started */
2351 port_is_stopped(portid_t port_id)
2353 struct rte_port *port = &ports[port_id];
2355 if ((port->port_status != RTE_PORT_STOPPED) &&
2356 (port->slave_flag == 0))
2362 all_ports_stopped(void)
2366 RTE_ETH_FOREACH_DEV(pi) {
2367 if (!port_is_stopped(pi))
2375 port_is_started(portid_t port_id)
2377 if (port_id_is_invalid(port_id, ENABLED_WARN))
2380 if (ports[port_id].port_status != RTE_PORT_STARTED)
2386 /* Configure the Rx and Tx hairpin queues for the selected port. */
2388 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2391 struct rte_eth_hairpin_conf hairpin_conf = {
2396 struct rte_port *port = &ports[pi];
2397 uint16_t peer_rx_port = pi;
2398 uint16_t peer_tx_port = pi;
2399 uint32_t manual = 1;
2400 uint32_t tx_exp = hairpin_mode & 0x10;
2402 if (!(hairpin_mode & 0xf)) {
2406 } else if (hairpin_mode & 0x1) {
2407 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2408 RTE_ETH_DEV_NO_OWNER);
2409 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2410 peer_tx_port = rte_eth_find_next_owned_by(0,
2411 RTE_ETH_DEV_NO_OWNER);
2412 if (p_pi != RTE_MAX_ETHPORTS) {
2413 peer_rx_port = p_pi;
2417 /* Last port will be the peer RX port of the first. */
2418 RTE_ETH_FOREACH_DEV(next_pi)
2419 peer_rx_port = next_pi;
2422 } else if (hairpin_mode & 0x2) {
2424 peer_rx_port = p_pi;
2426 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2427 RTE_ETH_DEV_NO_OWNER);
2428 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2431 peer_tx_port = peer_rx_port;
2435 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2436 hairpin_conf.peers[0].port = peer_rx_port;
2437 hairpin_conf.peers[0].queue = i + nb_rxq;
2438 hairpin_conf.manual_bind = !!manual;
2439 hairpin_conf.tx_explicit = !!tx_exp;
2440 diag = rte_eth_tx_hairpin_queue_setup
2441 (pi, qi, nb_txd, &hairpin_conf);
2446 /* Fail to setup rx queue, return */
2447 if (rte_atomic16_cmpset(&(port->port_status),
2449 RTE_PORT_STOPPED) == 0)
2450 printf("Port %d can not be set back "
2451 "to stopped\n", pi);
2452 printf("Fail to configure port %d hairpin "
2454 /* try to reconfigure queues next time */
2455 port->need_reconfig_queues = 1;
2458 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2459 hairpin_conf.peers[0].port = peer_tx_port;
2460 hairpin_conf.peers[0].queue = i + nb_txq;
2461 hairpin_conf.manual_bind = !!manual;
2462 hairpin_conf.tx_explicit = !!tx_exp;
2463 diag = rte_eth_rx_hairpin_queue_setup
2464 (pi, qi, nb_rxd, &hairpin_conf);
2469 /* Fail to setup rx queue, return */
2470 if (rte_atomic16_cmpset(&(port->port_status),
2472 RTE_PORT_STOPPED) == 0)
2473 printf("Port %d can not be set back "
2474 "to stopped\n", pi);
2475 printf("Fail to configure port %d hairpin "
2477 /* try to reconfigure queues next time */
2478 port->need_reconfig_queues = 1;
2484 /* Configure the Rx with optional split. */
2486 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2487 uint16_t nb_rx_desc, unsigned int socket_id,
2488 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2490 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2491 unsigned int i, mp_n;
2494 if (rx_pkt_nb_segs <= 1 ||
2495 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2496 rx_conf->rx_seg = NULL;
2497 rx_conf->rx_nseg = 0;
2498 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2499 nb_rx_desc, socket_id,
2503 for (i = 0; i < rx_pkt_nb_segs; i++) {
2504 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2505 struct rte_mempool *mpx;
2507 * Use last valid pool for the segments with number
2508 * exceeding the pool index.
2510 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2511 mpx = mbuf_pool_find(socket_id, mp_n);
2512 /* Handle zero as mbuf data buffer size. */
2513 rx_seg->length = rx_pkt_seg_lengths[i] ?
2514 rx_pkt_seg_lengths[i] :
2515 mbuf_data_size[mp_n];
2516 rx_seg->offset = i < rx_pkt_nb_offs ?
2517 rx_pkt_seg_offsets[i] : 0;
2518 rx_seg->mp = mpx ? mpx : mp;
2520 rx_conf->rx_nseg = rx_pkt_nb_segs;
2521 rx_conf->rx_seg = rx_useg;
2522 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2523 socket_id, rx_conf, NULL);
2524 rx_conf->rx_seg = NULL;
2525 rx_conf->rx_nseg = 0;
2530 start_port(portid_t pid)
2532 int diag, need_check_link_status = -1;
2534 portid_t p_pi = RTE_MAX_ETHPORTS;
2535 portid_t pl[RTE_MAX_ETHPORTS];
2536 portid_t peer_pl[RTE_MAX_ETHPORTS];
2537 uint16_t cnt_pi = 0;
2538 uint16_t cfg_pi = 0;
2541 struct rte_port *port;
2542 struct rte_ether_addr mac_addr;
2543 struct rte_eth_hairpin_cap cap;
2545 if (port_id_is_invalid(pid, ENABLED_WARN))
2550 RTE_ETH_FOREACH_DEV(pi) {
2551 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2554 need_check_link_status = 0;
2556 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2557 RTE_PORT_HANDLING) == 0) {
2558 printf("Port %d is now not stopped\n", pi);
2562 if (port->need_reconfig > 0) {
2563 port->need_reconfig = 0;
2565 if (flow_isolate_all) {
2566 int ret = port_flow_isolate(pi, 1);
2568 printf("Failed to apply isolated"
2569 " mode on port %d\n", pi);
2573 configure_rxtx_dump_callbacks(0);
2574 printf("Configuring Port %d (socket %u)\n", pi,
2576 if (nb_hairpinq > 0 &&
2577 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2578 printf("Port %d doesn't support hairpin "
2582 /* configure port */
2583 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2584 nb_txq + nb_hairpinq,
2587 if (rte_atomic16_cmpset(&(port->port_status),
2588 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2589 printf("Port %d can not be set back "
2590 "to stopped\n", pi);
2591 printf("Fail to configure port %d\n", pi);
2592 /* try to reconfigure port next time */
2593 port->need_reconfig = 1;
2597 if (port->need_reconfig_queues > 0) {
2598 port->need_reconfig_queues = 0;
2599 /* setup tx queues */
2600 for (qi = 0; qi < nb_txq; qi++) {
2601 if ((numa_support) &&
2602 (txring_numa[pi] != NUMA_NO_CONFIG))
2603 diag = rte_eth_tx_queue_setup(pi, qi,
2604 port->nb_tx_desc[qi],
2606 &(port->tx_conf[qi]));
2608 diag = rte_eth_tx_queue_setup(pi, qi,
2609 port->nb_tx_desc[qi],
2611 &(port->tx_conf[qi]));
2616 /* Fail to setup tx queue, return */
2617 if (rte_atomic16_cmpset(&(port->port_status),
2619 RTE_PORT_STOPPED) == 0)
2620 printf("Port %d can not be set back "
2621 "to stopped\n", pi);
2622 printf("Fail to configure port %d tx queues\n",
2624 /* try to reconfigure queues next time */
2625 port->need_reconfig_queues = 1;
2628 for (qi = 0; qi < nb_rxq; qi++) {
2629 /* setup rx queues */
2630 if ((numa_support) &&
2631 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2632 struct rte_mempool * mp =
2634 (rxring_numa[pi], 0);
2636 printf("Failed to setup RX queue:"
2637 "No mempool allocation"
2638 " on the socket %d\n",
2643 diag = rx_queue_setup(pi, qi,
2644 port->nb_rx_desc[qi],
2646 &(port->rx_conf[qi]),
2649 struct rte_mempool *mp =
2651 (port->socket_id, 0);
2653 printf("Failed to setup RX queue:"
2654 "No mempool allocation"
2655 " on the socket %d\n",
2659 diag = rx_queue_setup(pi, qi,
2660 port->nb_rx_desc[qi],
2662 &(port->rx_conf[qi]),
2668 /* Fail to setup rx queue, return */
2669 if (rte_atomic16_cmpset(&(port->port_status),
2671 RTE_PORT_STOPPED) == 0)
2672 printf("Port %d can not be set back "
2673 "to stopped\n", pi);
2674 printf("Fail to configure port %d rx queues\n",
2676 /* try to reconfigure queues next time */
2677 port->need_reconfig_queues = 1;
2680 /* setup hairpin queues */
2681 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2684 configure_rxtx_dump_callbacks(verbose_level);
2686 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2690 "Port %d: Failed to disable Ptype parsing\n",
2698 if (rte_eth_dev_start(pi) < 0) {
2699 printf("Fail to start port %d\n", pi);
2701 /* Fail to setup rx queue, return */
2702 if (rte_atomic16_cmpset(&(port->port_status),
2703 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2704 printf("Port %d can not be set back to "
2709 if (rte_atomic16_cmpset(&(port->port_status),
2710 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2711 printf("Port %d can not be set into started\n", pi);
2713 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2714 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2715 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2716 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2717 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2719 /* at least one port started, need checking link status */
2720 need_check_link_status = 1;
2725 if (need_check_link_status == 1 && !no_link_check)
2726 check_all_ports_link_status(RTE_PORT_ALL);
2727 else if (need_check_link_status == 0)
2728 printf("Please stop the ports first\n");
2730 if (hairpin_mode & 0xf) {
2734 /* bind all started hairpin ports */
2735 for (i = 0; i < cfg_pi; i++) {
2737 /* bind current Tx to all peer Rx */
2738 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2739 RTE_MAX_ETHPORTS, 1);
2742 for (j = 0; j < peer_pi; j++) {
2743 if (!port_is_started(peer_pl[j]))
2745 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2747 printf("Error during binding hairpin"
2748 " Tx port %u to %u: %s\n",
2750 rte_strerror(-diag));
2754 /* bind all peer Tx to current Rx */
2755 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2756 RTE_MAX_ETHPORTS, 0);
2759 for (j = 0; j < peer_pi; j++) {
2760 if (!port_is_started(peer_pl[j]))
2762 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2764 printf("Error during binding hairpin"
2765 " Tx port %u to %u: %s\n",
2767 rte_strerror(-diag));
2779 stop_port(portid_t pid)
2782 struct rte_port *port;
2783 int need_check_link_status = 0;
2784 portid_t peer_pl[RTE_MAX_ETHPORTS];
2792 if (port_id_is_invalid(pid, ENABLED_WARN))
2795 printf("Stopping ports...\n");
2797 RTE_ETH_FOREACH_DEV(pi) {
2798 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2801 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2802 printf("Please remove port %d from forwarding configuration.\n", pi);
2806 if (port_is_bonding_slave(pi)) {
2807 printf("Please remove port %d from bonded device.\n", pi);
2812 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2813 RTE_PORT_HANDLING) == 0)
2816 if (hairpin_mode & 0xf) {
2819 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2820 /* unbind all peer Tx from current Rx */
2821 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2822 RTE_MAX_ETHPORTS, 0);
2825 for (j = 0; j < peer_pi; j++) {
2826 if (!port_is_started(peer_pl[j]))
2828 rte_eth_hairpin_unbind(peer_pl[j], pi);
2832 if (rte_eth_dev_stop(pi) != 0)
2833 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2836 if (rte_atomic16_cmpset(&(port->port_status),
2837 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2838 printf("Port %d can not be set into stopped\n", pi);
2839 need_check_link_status = 1;
2841 if (need_check_link_status && !no_link_check)
2842 check_all_ports_link_status(RTE_PORT_ALL);
2848 remove_invalid_ports_in(portid_t *array, portid_t *total)
2851 portid_t new_total = 0;
2853 for (i = 0; i < *total; i++)
2854 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2855 array[new_total] = array[i];
2862 remove_invalid_ports(void)
2864 remove_invalid_ports_in(ports_ids, &nb_ports);
2865 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2866 nb_cfg_ports = nb_fwd_ports;
2870 close_port(portid_t pid)
2873 struct rte_port *port;
2875 if (port_id_is_invalid(pid, ENABLED_WARN))
2878 printf("Closing ports...\n");
2880 RTE_ETH_FOREACH_DEV(pi) {
2881 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2884 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2885 printf("Please remove port %d from forwarding configuration.\n", pi);
2889 if (port_is_bonding_slave(pi)) {
2890 printf("Please remove port %d from bonded device.\n", pi);
2895 if (rte_atomic16_cmpset(&(port->port_status),
2896 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2897 printf("Port %d is already closed\n", pi);
2901 port_flow_flush(pi);
2902 rte_eth_dev_close(pi);
2905 remove_invalid_ports();
2910 reset_port(portid_t pid)
2914 struct rte_port *port;
2916 if (port_id_is_invalid(pid, ENABLED_WARN))
2919 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2920 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2921 printf("Can not reset port(s), please stop port(s) first.\n");
2925 printf("Resetting ports...\n");
2927 RTE_ETH_FOREACH_DEV(pi) {
2928 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2931 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2932 printf("Please remove port %d from forwarding "
2933 "configuration.\n", pi);
2937 if (port_is_bonding_slave(pi)) {
2938 printf("Please remove port %d from bonded device.\n",
2943 diag = rte_eth_dev_reset(pi);
2946 port->need_reconfig = 1;
2947 port->need_reconfig_queues = 1;
2949 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2957 attach_port(char *identifier)
2960 struct rte_dev_iterator iterator;
2962 printf("Attaching a new port...\n");
2964 if (identifier == NULL) {
2965 printf("Invalid parameters are specified\n");
2969 if (rte_dev_probe(identifier) < 0) {
2970 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2974 /* first attach mode: event */
2975 if (setup_on_probe_event) {
2976 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2977 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2978 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2979 ports[pi].need_setup != 0)
2980 setup_attached_port(pi);
2984 /* second attach mode: iterator */
2985 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2986 /* setup ports matching the devargs used for probing */
2987 if (port_is_forwarding(pi))
2988 continue; /* port was already attached before */
2989 setup_attached_port(pi);
2994 setup_attached_port(portid_t pi)
2996 unsigned int socket_id;
2999 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3000 /* if socket_id is invalid, set to the first available socket. */
3001 if (check_socket_id(socket_id) < 0)
3002 socket_id = socket_ids[0];
3003 reconfig(pi, socket_id);
3004 ret = rte_eth_promiscuous_enable(pi);
3006 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3007 pi, rte_strerror(-ret));
3009 ports_ids[nb_ports++] = pi;
3010 fwd_ports_ids[nb_fwd_ports++] = pi;
3011 nb_cfg_ports = nb_fwd_ports;
3012 ports[pi].need_setup = 0;
3013 ports[pi].port_status = RTE_PORT_STOPPED;
3015 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3020 detach_device(struct rte_device *dev)
3025 printf("Device already removed\n");
3029 printf("Removing a device...\n");
3031 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3032 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3033 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3034 printf("Port %u not stopped\n", sibling);
3037 port_flow_flush(sibling);
3041 if (rte_dev_remove(dev) < 0) {
3042 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3045 remove_invalid_ports();
3047 printf("Device is detached\n");
3048 printf("Now total ports is %d\n", nb_ports);
3054 detach_port_device(portid_t port_id)
3056 if (port_id_is_invalid(port_id, ENABLED_WARN))
3059 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3060 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3061 printf("Port not stopped\n");
3064 printf("Port was not closed\n");
3067 detach_device(rte_eth_devices[port_id].device);
3071 detach_devargs(char *identifier)
3073 struct rte_dev_iterator iterator;
3074 struct rte_devargs da;
3077 printf("Removing a device...\n");
3079 memset(&da, 0, sizeof(da));
3080 if (rte_devargs_parsef(&da, "%s", identifier)) {
3081 printf("cannot parse identifier\n");
3087 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3088 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3089 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3090 printf("Port %u not stopped\n", port_id);
3091 rte_eth_iterator_cleanup(&iterator);
3094 port_flow_flush(port_id);
3098 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3099 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3100 da.name, da.bus->name);
3104 remove_invalid_ports();
3106 printf("Device %s is detached\n", identifier);
3107 printf("Now total ports is %d\n", nb_ports);
3119 stop_packet_forwarding();
3121 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3123 if (mp_alloc_type == MP_ALLOC_ANON)
3124 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3128 if (ports != NULL) {
3130 RTE_ETH_FOREACH_DEV(pt_id) {
3131 printf("\nStopping port %d...\n", pt_id);
3135 RTE_ETH_FOREACH_DEV(pt_id) {
3136 printf("\nShutting down port %d...\n", pt_id);
3143 ret = rte_dev_event_monitor_stop();
3146 "fail to stop device event monitor.");
3150 ret = rte_dev_event_callback_unregister(NULL,
3151 dev_event_callback, NULL);
3154 "fail to unregister device event callback.\n");
3158 ret = rte_dev_hotplug_handle_disable();
3161 "fail to disable hotplug handling.\n");
3165 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3167 rte_mempool_free(mempools[i]);
3170 printf("\nBye...\n");
3173 typedef void (*cmd_func_t)(void);
3174 struct pmd_test_command {
3175 const char *cmd_name;
3176 cmd_func_t cmd_func;
3179 /* Check the link status of all ports in up to 9s, and print them finally */
3181 check_all_ports_link_status(uint32_t port_mask)
3183 #define CHECK_INTERVAL 100 /* 100ms */
3184 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3186 uint8_t count, all_ports_up, print_flag = 0;
3187 struct rte_eth_link link;
3189 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3191 printf("Checking link statuses...\n");
3193 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3195 RTE_ETH_FOREACH_DEV(portid) {
3196 if ((port_mask & (1 << portid)) == 0)
3198 memset(&link, 0, sizeof(link));
3199 ret = rte_eth_link_get_nowait(portid, &link);
3202 if (print_flag == 1)
3203 printf("Port %u link get failed: %s\n",
3204 portid, rte_strerror(-ret));
3207 /* print link status if flag set */
3208 if (print_flag == 1) {
3209 rte_eth_link_to_str(link_status,
3210 sizeof(link_status), &link);
3211 printf("Port %d %s\n", portid, link_status);
3214 /* clear all_ports_up flag if any link down */
3215 if (link.link_status == ETH_LINK_DOWN) {
3220 /* after finally printing all link status, get out */
3221 if (print_flag == 1)
3224 if (all_ports_up == 0) {
3226 rte_delay_ms(CHECK_INTERVAL);
3229 /* set the print_flag if all ports up or timeout */
3230 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3240 rmv_port_callback(void *arg)
3242 int need_to_start = 0;
3243 int org_no_link_check = no_link_check;
3244 portid_t port_id = (intptr_t)arg;
3245 struct rte_device *dev;
3247 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3249 if (!test_done && port_is_forwarding(port_id)) {
3251 stop_packet_forwarding();
3255 no_link_check = org_no_link_check;
3257 /* Save rte_device pointer before closing ethdev port */
3258 dev = rte_eth_devices[port_id].device;
3259 close_port(port_id);
3260 detach_device(dev); /* might be already removed or have more ports */
3263 start_packet_forwarding(0);
3266 /* This function is used by the interrupt thread */
3268 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3271 RTE_SET_USED(param);
3272 RTE_SET_USED(ret_param);
3274 if (type >= RTE_ETH_EVENT_MAX) {
3275 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3276 port_id, __func__, type);
3278 } else if (event_print_mask & (UINT32_C(1) << type)) {
3279 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3280 eth_event_desc[type]);
3285 case RTE_ETH_EVENT_NEW:
3286 ports[port_id].need_setup = 1;
3287 ports[port_id].port_status = RTE_PORT_HANDLING;
3289 case RTE_ETH_EVENT_INTR_RMV:
3290 if (port_id_is_invalid(port_id, DISABLED_WARN))
3292 if (rte_eal_alarm_set(100000,
3293 rmv_port_callback, (void *)(intptr_t)port_id))
3294 fprintf(stderr, "Could not set up deferred device removal\n");
3296 case RTE_ETH_EVENT_DESTROY:
3297 ports[port_id].port_status = RTE_PORT_CLOSED;
3298 printf("Port %u is closed\n", port_id);
3307 register_eth_event_callback(void)
3310 enum rte_eth_event_type event;
3312 for (event = RTE_ETH_EVENT_UNKNOWN;
3313 event < RTE_ETH_EVENT_MAX; event++) {
3314 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3319 TESTPMD_LOG(ERR, "Failed to register callback for "
3320 "%s event\n", eth_event_desc[event]);
3328 /* This function is used by the interrupt thread */
3330 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3331 __rte_unused void *arg)
3336 if (type >= RTE_DEV_EVENT_MAX) {
3337 fprintf(stderr, "%s called upon invalid event %d\n",
3343 case RTE_DEV_EVENT_REMOVE:
3344 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3346 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3348 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3353 * Because the user's callback is invoked in eal interrupt
3354 * callback, the interrupt callback need to be finished before
3355 * it can be unregistered when detaching device. So finish
3356 * callback soon and use a deferred removal to detach device
3357 * is need. It is a workaround, once the device detaching be
3358 * moved into the eal in the future, the deferred removal could
3361 if (rte_eal_alarm_set(100000,
3362 rmv_port_callback, (void *)(intptr_t)port_id))
3364 "Could not set up deferred device removal\n");
3366 case RTE_DEV_EVENT_ADD:
3367 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3369 /* TODO: After finish kernel driver binding,
3370 * begin to attach port.
3379 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3383 uint8_t mapping_found = 0;
3385 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3386 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3387 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3388 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3389 tx_queue_stats_mappings[i].queue_id,
3390 tx_queue_stats_mappings[i].stats_counter_id);
3397 port->tx_queue_stats_mapping_enabled = 1;
3402 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3406 uint8_t mapping_found = 0;
3408 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3409 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3410 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3411 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3412 rx_queue_stats_mappings[i].queue_id,
3413 rx_queue_stats_mappings[i].stats_counter_id);
3420 port->rx_queue_stats_mapping_enabled = 1;
3425 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3429 diag = set_tx_queue_stats_mapping_registers(pi, port);
3431 if (diag == -ENOTSUP) {
3432 port->tx_queue_stats_mapping_enabled = 0;
3433 printf("TX queue stats mapping not supported port id=%d\n", pi);
3436 rte_exit(EXIT_FAILURE,
3437 "set_tx_queue_stats_mapping_registers "
3438 "failed for port id=%d diag=%d\n",
3442 diag = set_rx_queue_stats_mapping_registers(pi, port);
3444 if (diag == -ENOTSUP) {
3445 port->rx_queue_stats_mapping_enabled = 0;
3446 printf("RX queue stats mapping not supported port id=%d\n", pi);
3449 rte_exit(EXIT_FAILURE,
3450 "set_rx_queue_stats_mapping_registers "
3451 "failed for port id=%d diag=%d\n",
3457 rxtx_port_config(struct rte_port *port)
3462 for (qid = 0; qid < nb_rxq; qid++) {
3463 offloads = port->rx_conf[qid].offloads;
3464 port->rx_conf[qid] = port->dev_info.default_rxconf;
3466 port->rx_conf[qid].offloads = offloads;
3468 /* Check if any Rx parameters have been passed */
3469 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3470 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3472 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3473 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3475 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3476 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3478 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3479 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3481 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3482 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3484 port->nb_rx_desc[qid] = nb_rxd;
3487 for (qid = 0; qid < nb_txq; qid++) {
3488 offloads = port->tx_conf[qid].offloads;
3489 port->tx_conf[qid] = port->dev_info.default_txconf;
3491 port->tx_conf[qid].offloads = offloads;
3493 /* Check if any Tx parameters have been passed */
3494 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3495 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3497 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3498 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3500 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3501 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3503 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3504 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3506 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3507 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3509 port->nb_tx_desc[qid] = nb_txd;
3514 init_port_config(void)
3517 struct rte_port *port;
3520 RTE_ETH_FOREACH_DEV(pid) {
3522 port->dev_conf.fdir_conf = fdir_conf;
3524 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3529 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3530 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3531 rss_hf & port->dev_info.flow_type_rss_offloads;
3533 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3534 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3537 if (port->dcb_flag == 0) {
3538 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3539 port->dev_conf.rxmode.mq_mode =
3540 (enum rte_eth_rx_mq_mode)
3541 (rx_mq_mode & ETH_MQ_RX_RSS);
3543 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3546 rxtx_port_config(port);
3548 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3552 map_port_queue_stats_mapping_registers(pid, port);
3553 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3554 rte_pmd_ixgbe_bypass_init(pid);
3557 if (lsc_interrupt &&
3558 (rte_eth_devices[pid].data->dev_flags &
3559 RTE_ETH_DEV_INTR_LSC))
3560 port->dev_conf.intr_conf.lsc = 1;
3561 if (rmv_interrupt &&
3562 (rte_eth_devices[pid].data->dev_flags &
3563 RTE_ETH_DEV_INTR_RMV))
3564 port->dev_conf.intr_conf.rmv = 1;
3568 void set_port_slave_flag(portid_t slave_pid)
3570 struct rte_port *port;
3572 port = &ports[slave_pid];
3573 port->slave_flag = 1;
3576 void clear_port_slave_flag(portid_t slave_pid)
3578 struct rte_port *port;
3580 port = &ports[slave_pid];
3581 port->slave_flag = 0;
3584 uint8_t port_is_bonding_slave(portid_t slave_pid)
3586 struct rte_port *port;
3588 port = &ports[slave_pid];
3589 if ((rte_eth_devices[slave_pid].data->dev_flags &
3590 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3595 const uint16_t vlan_tags[] = {
3596 0, 1, 2, 3, 4, 5, 6, 7,
3597 8, 9, 10, 11, 12, 13, 14, 15,
3598 16, 17, 18, 19, 20, 21, 22, 23,
3599 24, 25, 26, 27, 28, 29, 30, 31
3603 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3604 enum dcb_mode_enable dcb_mode,
3605 enum rte_eth_nb_tcs num_tcs,
3610 struct rte_eth_rss_conf rss_conf;
3613 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3614 * given above, and the number of traffic classes available for use.
3616 if (dcb_mode == DCB_VT_ENABLED) {
3617 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3618 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3619 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3620 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3622 /* VMDQ+DCB RX and TX configurations */
3623 vmdq_rx_conf->enable_default_pool = 0;
3624 vmdq_rx_conf->default_pool = 0;
3625 vmdq_rx_conf->nb_queue_pools =
3626 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3627 vmdq_tx_conf->nb_queue_pools =
3628 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3630 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3631 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3632 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3633 vmdq_rx_conf->pool_map[i].pools =
3634 1 << (i % vmdq_rx_conf->nb_queue_pools);
3636 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3637 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3638 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3641 /* set DCB mode of RX and TX of multiple queues */
3642 eth_conf->rxmode.mq_mode =
3643 (enum rte_eth_rx_mq_mode)
3644 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3645 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3647 struct rte_eth_dcb_rx_conf *rx_conf =
3648 ð_conf->rx_adv_conf.dcb_rx_conf;
3649 struct rte_eth_dcb_tx_conf *tx_conf =
3650 ð_conf->tx_adv_conf.dcb_tx_conf;
3652 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3654 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3658 rx_conf->nb_tcs = num_tcs;
3659 tx_conf->nb_tcs = num_tcs;
3661 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3662 rx_conf->dcb_tc[i] = i % num_tcs;
3663 tx_conf->dcb_tc[i] = i % num_tcs;
3666 eth_conf->rxmode.mq_mode =
3667 (enum rte_eth_rx_mq_mode)
3668 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3669 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3670 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3674 eth_conf->dcb_capability_en =
3675 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3677 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3683 init_port_dcb_config(portid_t pid,
3684 enum dcb_mode_enable dcb_mode,
3685 enum rte_eth_nb_tcs num_tcs,
3688 struct rte_eth_conf port_conf;
3689 struct rte_port *rte_port;
3693 rte_port = &ports[pid];
3695 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3696 /* Enter DCB configuration status */
3699 port_conf.rxmode = rte_port->dev_conf.rxmode;
3700 port_conf.txmode = rte_port->dev_conf.txmode;
3702 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3703 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3706 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3708 /* re-configure the device . */
3709 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3713 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3717 /* If dev_info.vmdq_pool_base is greater than 0,
3718 * the queue id of vmdq pools is started after pf queues.
3720 if (dcb_mode == DCB_VT_ENABLED &&
3721 rte_port->dev_info.vmdq_pool_base > 0) {
3722 printf("VMDQ_DCB multi-queue mode is nonsensical"
3723 " for port %d.", pid);
3727 /* Assume the ports in testpmd have the same dcb capability
3728 * and has the same number of rxq and txq in dcb mode
3730 if (dcb_mode == DCB_VT_ENABLED) {
3731 if (rte_port->dev_info.max_vfs > 0) {
3732 nb_rxq = rte_port->dev_info.nb_rx_queues;
3733 nb_txq = rte_port->dev_info.nb_tx_queues;
3735 nb_rxq = rte_port->dev_info.max_rx_queues;
3736 nb_txq = rte_port->dev_info.max_tx_queues;
3739 /*if vt is disabled, use all pf queues */
3740 if (rte_port->dev_info.vmdq_pool_base == 0) {
3741 nb_rxq = rte_port->dev_info.max_rx_queues;
3742 nb_txq = rte_port->dev_info.max_tx_queues;
3744 nb_rxq = (queueid_t)num_tcs;
3745 nb_txq = (queueid_t)num_tcs;
3749 rx_free_thresh = 64;
3751 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3753 rxtx_port_config(rte_port);
3755 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3756 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3757 rx_vft_set(pid, vlan_tags[i], 1);
3759 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3763 map_port_queue_stats_mapping_registers(pid, rte_port);
3765 rte_port->dcb_flag = 1;
3775 /* Configuration of Ethernet ports. */
3776 ports = rte_zmalloc("testpmd: ports",
3777 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3778 RTE_CACHE_LINE_SIZE);
3779 if (ports == NULL) {
3780 rte_exit(EXIT_FAILURE,
3781 "rte_zmalloc(%d struct rte_port) failed\n",
3784 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3785 LIST_INIT(&ports[i].flow_tunnel_list);
3786 /* Initialize ports NUMA structures */
3787 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3788 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3789 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3803 const char clr[] = { 27, '[', '2', 'J', '\0' };
3804 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3806 /* Clear screen and move to top left */
3807 printf("%s%s", clr, top_left);
3809 printf("\nPort statistics ====================================");
3810 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3811 nic_stats_display(fwd_ports_ids[i]);
3817 signal_handler(int signum)
3819 if (signum == SIGINT || signum == SIGTERM) {
3820 printf("\nSignal %d received, preparing to exit...\n",
3822 #ifdef RTE_LIB_PDUMP
3823 /* uninitialize packet capture framework */
3826 #ifdef RTE_LIB_LATENCYSTATS
3827 if (latencystats_enabled != 0)
3828 rte_latencystats_uninit();
3831 /* Set flag to indicate the force termination. */
3833 /* exit with the expected status */
3834 signal(signum, SIG_DFL);
3835 kill(getpid(), signum);
3840 main(int argc, char** argv)
3847 signal(SIGINT, signal_handler);
3848 signal(SIGTERM, signal_handler);
3850 testpmd_logtype = rte_log_register("testpmd");
3851 if (testpmd_logtype < 0)
3852 rte_exit(EXIT_FAILURE, "Cannot register log type");
3853 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3855 diag = rte_eal_init(argc, argv);
3857 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3858 rte_strerror(rte_errno));
3860 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3861 rte_exit(EXIT_FAILURE,
3862 "Secondary process type not supported.\n");
3864 ret = register_eth_event_callback();
3866 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3868 #ifdef RTE_LIB_PDUMP
3869 /* initialize packet capture framework */
3874 RTE_ETH_FOREACH_DEV(port_id) {
3875 ports_ids[count] = port_id;
3878 nb_ports = (portid_t) count;
3880 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3882 /* allocate port structures, and init them */
3885 set_def_fwd_config();
3887 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3888 "Check the core mask argument\n");
3890 /* Bitrate/latency stats disabled by default */
3891 #ifdef RTE_LIB_BITRATESTATS
3892 bitrate_enabled = 0;
3894 #ifdef RTE_LIB_LATENCYSTATS
3895 latencystats_enabled = 0;
3898 /* on FreeBSD, mlockall() is disabled by default */
3899 #ifdef RTE_EXEC_ENV_FREEBSD
3908 launch_args_parse(argc, argv);
3910 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3911 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3915 if (tx_first && interactive)
3916 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3917 "interactive mode.\n");
3919 if (tx_first && lsc_interrupt) {
3920 printf("Warning: lsc_interrupt needs to be off when "
3921 " using tx_first. Disabling.\n");
3925 if (!nb_rxq && !nb_txq)
3926 printf("Warning: Either rx or tx queues should be non-zero\n");
3928 if (nb_rxq > 1 && nb_rxq > nb_txq)
3929 printf("Warning: nb_rxq=%d enables RSS configuration, "
3930 "but nb_txq=%d will prevent to fully test it.\n",
3936 ret = rte_dev_hotplug_handle_enable();
3939 "fail to enable hotplug handling.");
3943 ret = rte_dev_event_monitor_start();
3946 "fail to start device event monitoring.");
3950 ret = rte_dev_event_callback_register(NULL,
3951 dev_event_callback, NULL);
3954 "fail to register device event callback\n");
3959 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3960 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3962 /* set all ports to promiscuous mode by default */
3963 RTE_ETH_FOREACH_DEV(port_id) {
3964 ret = rte_eth_promiscuous_enable(port_id);
3966 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3967 port_id, rte_strerror(-ret));
3970 /* Init metrics library */
3971 rte_metrics_init(rte_socket_id());
3973 #ifdef RTE_LIB_LATENCYSTATS
3974 if (latencystats_enabled != 0) {
3975 int ret = rte_latencystats_init(1, NULL);
3977 printf("Warning: latencystats init()"
3978 " returned error %d\n", ret);
3979 printf("Latencystats running on lcore %d\n",
3980 latencystats_lcore_id);
3984 /* Setup bitrate stats */
3985 #ifdef RTE_LIB_BITRATESTATS
3986 if (bitrate_enabled != 0) {
3987 bitrate_data = rte_stats_bitrate_create();
3988 if (bitrate_data == NULL)
3989 rte_exit(EXIT_FAILURE,
3990 "Could not allocate bitrate data.\n");
3991 rte_stats_bitrate_reg(bitrate_data);
3995 #ifdef RTE_LIB_CMDLINE
3996 if (strlen(cmdline_filename) != 0)
3997 cmdline_read_from_file(cmdline_filename);
3999 if (interactive == 1) {
4001 printf("Start automatic packet forwarding\n");
4002 start_packet_forwarding(0);
4014 printf("No commandline core given, start packet forwarding\n");
4015 start_packet_forwarding(tx_first);
4016 if (stats_period != 0) {
4017 uint64_t prev_time = 0, cur_time, diff_time = 0;
4018 uint64_t timer_period;
4020 /* Convert to number of cycles */
4021 timer_period = stats_period * rte_get_timer_hz();
4023 while (f_quit == 0) {
4024 cur_time = rte_get_timer_cycles();
4025 diff_time += cur_time - prev_time;
4027 if (diff_time >= timer_period) {
4029 /* Reset the timer */
4032 /* Sleep to avoid unnecessary checks */
4033 prev_time = cur_time;
4038 printf("Press enter to exit\n");
4039 rc = read(0, &c, 1);
4045 ret = rte_eal_cleanup();
4047 rte_exit(EXIT_FAILURE,
4048 "EAL cleanup failed: %s\n", strerror(-ret));
4050 return EXIT_SUCCESS;