1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
239 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
240 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
243 * Configurable number of RX/TX ring descriptors.
244 * Defaults are supplied by drivers via ethdev.
246 #define RTE_TEST_RX_DESC_DEFAULT 0
247 #define RTE_TEST_TX_DESC_DEFAULT 0
248 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
249 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
251 #define RTE_PMD_PARAM_UNSET -1
253 * Configurable values of RX and TX ring threshold registers.
256 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
258 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
262 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
265 * Configurable value of RX free threshold.
267 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
270 * Configurable value of RX drop enable.
272 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
275 * Configurable value of TX free threshold.
277 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
280 * Configurable value of TX RS bit threshold.
282 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
285 * Configurable value of buffered packets before sending.
287 uint16_t noisy_tx_sw_bufsz;
290 * Configurable value of packet buffer timeout.
292 uint16_t noisy_tx_sw_buf_flush_time;
295 * Configurable value for size of VNF internal memory area
296 * used for simulating noisy neighbour behaviour
298 uint64_t noisy_lkup_mem_sz;
301 * Configurable value of number of random writes done in
302 * VNF simulation memory area.
304 uint64_t noisy_lkup_num_writes;
307 * Configurable value of number of random reads done in
308 * VNF simulation memory area.
310 uint64_t noisy_lkup_num_reads;
313 * Configurable value of number of random reads/writes done in
314 * VNF simulation memory area.
316 uint64_t noisy_lkup_num_reads_writes;
319 * Receive Side Scaling (RSS) configuration.
321 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
324 * Port topology configuration
326 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
329 * Avoids to flush all the RX streams before starts forwarding.
331 uint8_t no_flush_rx = 0; /* flush by default */
334 * Flow API isolated mode.
336 uint8_t flow_isolate_all;
339 * Avoids to check link status when starting/stopping a port.
341 uint8_t no_link_check = 0; /* check by default */
344 * Don't automatically start all ports in interactive mode.
346 uint8_t no_device_start = 0;
349 * Enable link status change notification
351 uint8_t lsc_interrupt = 1; /* enabled by default */
354 * Enable device removal notification.
356 uint8_t rmv_interrupt = 1; /* enabled by default */
358 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
360 /* After attach, port setup is called on event or by iterator */
361 bool setup_on_probe_event = true;
363 /* Clear ptypes on port initialization. */
364 uint8_t clear_ptypes = true;
366 /* Pretty printing of ethdev events */
367 static const char * const eth_event_desc[] = {
368 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
369 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
370 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
371 [RTE_ETH_EVENT_INTR_RESET] = "reset",
372 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
373 [RTE_ETH_EVENT_IPSEC] = "IPsec",
374 [RTE_ETH_EVENT_MACSEC] = "MACsec",
375 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
376 [RTE_ETH_EVENT_NEW] = "device probed",
377 [RTE_ETH_EVENT_DESTROY] = "device released",
378 [RTE_ETH_EVENT_MAX] = NULL,
382 * Display or mask ether events
383 * Default to all events except VF_MBOX
385 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
386 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
387 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
388 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
389 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
390 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
391 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
393 * Decide if all memory are locked for performance.
398 * NIC bypass mode configuration options.
401 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
402 /* The NIC bypass watchdog timeout. */
403 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
407 #ifdef RTE_LIBRTE_LATENCY_STATS
410 * Set when latency stats is enabled in the commandline
412 uint8_t latencystats_enabled;
415 * Lcore ID to serive latency statistics.
417 lcoreid_t latencystats_lcore_id = -1;
422 * Ethernet device configuration.
424 struct rte_eth_rxmode rx_mode = {
425 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
426 /**< Default maximum frame length. */
429 struct rte_eth_txmode tx_mode = {
430 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
433 struct rte_fdir_conf fdir_conf = {
434 .mode = RTE_FDIR_MODE_NONE,
435 .pballoc = RTE_FDIR_PBALLOC_64K,
436 .status = RTE_FDIR_REPORT_STATUS,
438 .vlan_tci_mask = 0xFFEF,
440 .src_ip = 0xFFFFFFFF,
441 .dst_ip = 0xFFFFFFFF,
444 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
445 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
447 .src_port_mask = 0xFFFF,
448 .dst_port_mask = 0xFFFF,
449 .mac_addr_byte_mask = 0xFF,
450 .tunnel_type_mask = 1,
451 .tunnel_id_mask = 0xFFFFFFFF,
456 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
458 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
459 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
461 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
462 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
464 uint16_t nb_tx_queue_stats_mappings = 0;
465 uint16_t nb_rx_queue_stats_mappings = 0;
468 * Display zero values by default for xstats
470 uint8_t xstats_hide_zero;
472 unsigned int num_sockets = 0;
473 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
475 #ifdef RTE_LIBRTE_BITRATE
476 /* Bitrate statistics */
477 struct rte_stats_bitrates *bitrate_data;
478 lcoreid_t bitrate_lcore_id;
479 uint8_t bitrate_enabled;
482 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
483 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
485 /* Forward function declarations */
486 static void setup_attached_port(portid_t pi);
487 static void map_port_queue_stats_mapping_registers(portid_t pi,
488 struct rte_port *port);
489 static void check_all_ports_link_status(uint32_t port_mask);
490 static int eth_event_callback(portid_t port_id,
491 enum rte_eth_event_type type,
492 void *param, void *ret_param);
493 static void dev_event_callback(const char *device_name,
494 enum rte_dev_event_type type,
498 * Check if all the ports are started.
499 * If yes, return positive value. If not, return zero.
501 static int all_ports_started(void);
503 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
504 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
506 /* Holds the registered mbuf dynamic flags names. */
507 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
510 * Helper function to check if socket is already discovered.
511 * If yes, return positive value. If not, return zero.
514 new_socket_id(unsigned int socket_id)
518 for (i = 0; i < num_sockets; i++) {
519 if (socket_ids[i] == socket_id)
526 * Setup default configuration.
529 set_default_fwd_lcores_config(void)
533 unsigned int sock_num;
536 for (i = 0; i < RTE_MAX_LCORE; i++) {
537 if (!rte_lcore_is_enabled(i))
539 sock_num = rte_lcore_to_socket_id(i);
540 if (new_socket_id(sock_num)) {
541 if (num_sockets >= RTE_MAX_NUMA_NODES) {
542 rte_exit(EXIT_FAILURE,
543 "Total sockets greater than %u\n",
546 socket_ids[num_sockets++] = sock_num;
548 if (i == rte_get_master_lcore())
550 fwd_lcores_cpuids[nb_lc++] = i;
552 nb_lcores = (lcoreid_t) nb_lc;
553 nb_cfg_lcores = nb_lcores;
558 set_def_peer_eth_addrs(void)
562 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
563 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
564 peer_eth_addrs[i].addr_bytes[5] = i;
569 set_default_fwd_ports_config(void)
574 RTE_ETH_FOREACH_DEV(pt_id) {
575 fwd_ports_ids[i++] = pt_id;
577 /* Update sockets info according to the attached device */
578 int socket_id = rte_eth_dev_socket_id(pt_id);
579 if (socket_id >= 0 && new_socket_id(socket_id)) {
580 if (num_sockets >= RTE_MAX_NUMA_NODES) {
581 rte_exit(EXIT_FAILURE,
582 "Total sockets greater than %u\n",
585 socket_ids[num_sockets++] = socket_id;
589 nb_cfg_ports = nb_ports;
590 nb_fwd_ports = nb_ports;
594 set_def_fwd_config(void)
596 set_default_fwd_lcores_config();
597 set_def_peer_eth_addrs();
598 set_default_fwd_ports_config();
601 /* extremely pessimistic estimation of memory required to create a mempool */
603 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
605 unsigned int n_pages, mbuf_per_pg, leftover;
606 uint64_t total_mem, mbuf_mem, obj_sz;
608 /* there is no good way to predict how much space the mempool will
609 * occupy because it will allocate chunks on the fly, and some of those
610 * will come from default DPDK memory while some will come from our
611 * external memory, so just assume 128MB will be enough for everyone.
613 uint64_t hdr_mem = 128 << 20;
615 /* account for possible non-contiguousness */
616 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
618 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
622 mbuf_per_pg = pgsz / obj_sz;
623 leftover = (nb_mbufs % mbuf_per_pg) > 0;
624 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
626 mbuf_mem = n_pages * pgsz;
628 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
630 if (total_mem > SIZE_MAX) {
631 TESTPMD_LOG(ERR, "Memory size too big\n");
634 *out = (size_t)total_mem;
640 pagesz_flags(uint64_t page_sz)
642 /* as per mmap() manpage, all page sizes are log2 of page size
643 * shifted by MAP_HUGE_SHIFT
645 int log2 = rte_log2_u64(page_sz);
647 return (log2 << HUGE_SHIFT);
651 alloc_mem(size_t memsz, size_t pgsz, bool huge)
656 /* allocate anonymous hugepages */
657 flags = MAP_ANONYMOUS | MAP_PRIVATE;
659 flags |= HUGE_FLAG | pagesz_flags(pgsz);
661 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
662 if (addr == MAP_FAILED)
668 struct extmem_param {
672 rte_iova_t *iova_table;
673 unsigned int iova_table_len;
677 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
680 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
681 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
682 unsigned int cur_page, n_pages, pgsz_idx;
683 size_t mem_sz, cur_pgsz;
684 rte_iova_t *iovas = NULL;
688 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
689 /* skip anything that is too big */
690 if (pgsizes[pgsz_idx] > SIZE_MAX)
693 cur_pgsz = pgsizes[pgsz_idx];
695 /* if we were told not to allocate hugepages, override */
697 cur_pgsz = sysconf(_SC_PAGESIZE);
699 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
701 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
705 /* allocate our memory */
706 addr = alloc_mem(mem_sz, cur_pgsz, huge);
708 /* if we couldn't allocate memory with a specified page size,
709 * that doesn't mean we can't do it with other page sizes, so
715 /* store IOVA addresses for every page in this memory area */
716 n_pages = mem_sz / cur_pgsz;
718 iovas = malloc(sizeof(*iovas) * n_pages);
721 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
724 /* lock memory if it's not huge pages */
728 /* populate IOVA addresses */
729 for (cur_page = 0; cur_page < n_pages; cur_page++) {
734 offset = cur_pgsz * cur_page;
735 cur = RTE_PTR_ADD(addr, offset);
737 /* touch the page before getting its IOVA */
738 *(volatile char *)cur = 0;
740 iova = rte_mem_virt2iova(cur);
742 iovas[cur_page] = iova;
747 /* if we couldn't allocate anything */
753 param->pgsz = cur_pgsz;
754 param->iova_table = iovas;
755 param->iova_table_len = n_pages;
762 munmap(addr, mem_sz);
768 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
770 struct extmem_param param;
773 memset(¶m, 0, sizeof(param));
775 /* check if our heap exists */
776 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
778 /* create our heap */
779 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
781 TESTPMD_LOG(ERR, "Cannot create heap\n");
786 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
788 TESTPMD_LOG(ERR, "Cannot create memory area\n");
792 /* we now have a valid memory area, so add it to heap */
793 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
794 param.addr, param.len, param.iova_table,
795 param.iova_table_len, param.pgsz);
797 /* when using VFIO, memory is automatically mapped for DMA by EAL */
799 /* not needed any more */
800 free(param.iova_table);
803 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
804 munmap(param.addr, param.len);
810 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
816 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
817 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
822 RTE_ETH_FOREACH_DEV(pid) {
823 struct rte_eth_dev *dev =
824 &rte_eth_devices[pid];
826 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
830 "unable to DMA unmap addr 0x%p "
832 memhdr->addr, dev->data->name);
835 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
838 "unable to un-register addr 0x%p\n", memhdr->addr);
843 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
844 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
847 size_t page_size = sysconf(_SC_PAGESIZE);
850 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
854 "unable to register addr 0x%p\n", memhdr->addr);
857 RTE_ETH_FOREACH_DEV(pid) {
858 struct rte_eth_dev *dev =
859 &rte_eth_devices[pid];
861 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
865 "unable to DMA map addr 0x%p "
867 memhdr->addr, dev->data->name);
873 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
874 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
876 struct rte_pktmbuf_extmem *xmem;
877 unsigned int ext_num, zone_num, elt_num;
880 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
881 elt_num = EXTBUF_ZONE_SIZE / elt_size;
882 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
884 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
886 TESTPMD_LOG(ERR, "Cannot allocate memory for "
887 "external buffer descriptors\n");
891 for (ext_num = 0; ext_num < zone_num; ext_num++) {
892 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
893 const struct rte_memzone *mz;
894 char mz_name[RTE_MEMZONE_NAMESIZE];
897 ret = snprintf(mz_name, sizeof(mz_name),
898 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
899 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
900 errno = ENAMETOOLONG;
904 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
906 RTE_MEMZONE_IOVA_CONTIG |
908 RTE_MEMZONE_SIZE_HINT_ONLY,
912 * The caller exits on external buffer creation
913 * error, so there is no need to free memzones.
919 xseg->buf_ptr = mz->addr;
920 xseg->buf_iova = mz->iova;
921 xseg->buf_len = EXTBUF_ZONE_SIZE;
922 xseg->elt_size = elt_size;
924 if (ext_num == 0 && xmem != NULL) {
933 * Configuration initialisation done once at init time.
935 static struct rte_mempool *
936 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
937 unsigned int socket_id)
939 char pool_name[RTE_MEMPOOL_NAMESIZE];
940 struct rte_mempool *rte_mp = NULL;
943 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
944 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
947 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
948 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
950 switch (mp_alloc_type) {
951 case MP_ALLOC_NATIVE:
953 /* wrapper to rte_mempool_create() */
954 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
955 rte_mbuf_best_mempool_ops());
956 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
957 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
962 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
963 mb_size, (unsigned int) mb_mempool_cache,
964 sizeof(struct rte_pktmbuf_pool_private),
965 socket_id, mempool_flags);
969 if (rte_mempool_populate_anon(rte_mp) == 0) {
970 rte_mempool_free(rte_mp);
974 rte_pktmbuf_pool_init(rte_mp, NULL);
975 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
976 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
980 case MP_ALLOC_XMEM_HUGE:
983 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
985 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
986 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
989 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
991 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
993 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
994 rte_mbuf_best_mempool_ops());
995 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
996 mb_mempool_cache, 0, mbuf_seg_size,
1002 struct rte_pktmbuf_extmem *ext_mem;
1003 unsigned int ext_num;
1005 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1006 socket_id, pool_name, &ext_mem);
1008 rte_exit(EXIT_FAILURE,
1009 "Can't create pinned data buffers\n");
1011 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1012 rte_mbuf_best_mempool_ops());
1013 rte_mp = rte_pktmbuf_pool_create_extbuf
1014 (pool_name, nb_mbuf, mb_mempool_cache,
1015 0, mbuf_seg_size, socket_id,
1022 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1027 if (rte_mp == NULL) {
1028 rte_exit(EXIT_FAILURE,
1029 "Creation of mbuf pool for socket %u failed: %s\n",
1030 socket_id, rte_strerror(rte_errno));
1031 } else if (verbose_level > 0) {
1032 rte_mempool_dump(stdout, rte_mp);
1038 * Check given socket id is valid or not with NUMA mode,
1039 * if valid, return 0, else return -1
1042 check_socket_id(const unsigned int socket_id)
1044 static int warning_once = 0;
1046 if (new_socket_id(socket_id)) {
1047 if (!warning_once && numa_support)
1048 printf("Warning: NUMA should be configured manually by"
1049 " using --port-numa-config and"
1050 " --ring-numa-config parameters along with"
1059 * Get the allowed maximum number of RX queues.
1060 * *pid return the port id which has minimal value of
1061 * max_rx_queues in all ports.
1064 get_allowed_max_nb_rxq(portid_t *pid)
1066 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1067 bool max_rxq_valid = false;
1069 struct rte_eth_dev_info dev_info;
1071 RTE_ETH_FOREACH_DEV(pi) {
1072 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1075 max_rxq_valid = true;
1076 if (dev_info.max_rx_queues < allowed_max_rxq) {
1077 allowed_max_rxq = dev_info.max_rx_queues;
1081 return max_rxq_valid ? allowed_max_rxq : 0;
1085 * Check input rxq is valid or not.
1086 * If input rxq is not greater than any of maximum number
1087 * of RX queues of all ports, it is valid.
1088 * if valid, return 0, else return -1
1091 check_nb_rxq(queueid_t rxq)
1093 queueid_t allowed_max_rxq;
1096 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1097 if (rxq > allowed_max_rxq) {
1098 printf("Fail: input rxq (%u) can't be greater "
1099 "than max_rx_queues (%u) of port %u\n",
1109 * Get the allowed maximum number of TX queues.
1110 * *pid return the port id which has minimal value of
1111 * max_tx_queues in all ports.
1114 get_allowed_max_nb_txq(portid_t *pid)
1116 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1117 bool max_txq_valid = false;
1119 struct rte_eth_dev_info dev_info;
1121 RTE_ETH_FOREACH_DEV(pi) {
1122 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1125 max_txq_valid = true;
1126 if (dev_info.max_tx_queues < allowed_max_txq) {
1127 allowed_max_txq = dev_info.max_tx_queues;
1131 return max_txq_valid ? allowed_max_txq : 0;
1135 * Check input txq is valid or not.
1136 * If input txq is not greater than any of maximum number
1137 * of TX queues of all ports, it is valid.
1138 * if valid, return 0, else return -1
1141 check_nb_txq(queueid_t txq)
1143 queueid_t allowed_max_txq;
1146 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1147 if (txq > allowed_max_txq) {
1148 printf("Fail: input txq (%u) can't be greater "
1149 "than max_tx_queues (%u) of port %u\n",
1159 * Get the allowed maximum number of RXDs of every rx queue.
1160 * *pid return the port id which has minimal value of
1161 * max_rxd in all queues of all ports.
1164 get_allowed_max_nb_rxd(portid_t *pid)
1166 uint16_t allowed_max_rxd = UINT16_MAX;
1168 struct rte_eth_dev_info dev_info;
1170 RTE_ETH_FOREACH_DEV(pi) {
1171 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1174 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1175 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1179 return allowed_max_rxd;
1183 * Get the allowed minimal number of RXDs of every rx queue.
1184 * *pid return the port id which has minimal value of
1185 * min_rxd in all queues of all ports.
1188 get_allowed_min_nb_rxd(portid_t *pid)
1190 uint16_t allowed_min_rxd = 0;
1192 struct rte_eth_dev_info dev_info;
1194 RTE_ETH_FOREACH_DEV(pi) {
1195 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1198 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1199 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1204 return allowed_min_rxd;
1208 * Check input rxd is valid or not.
1209 * If input rxd is not greater than any of maximum number
1210 * of RXDs of every Rx queues and is not less than any of
1211 * minimal number of RXDs of every Rx queues, it is valid.
1212 * if valid, return 0, else return -1
1215 check_nb_rxd(queueid_t rxd)
1217 uint16_t allowed_max_rxd;
1218 uint16_t allowed_min_rxd;
1221 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1222 if (rxd > allowed_max_rxd) {
1223 printf("Fail: input rxd (%u) can't be greater "
1224 "than max_rxds (%u) of port %u\n",
1231 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1232 if (rxd < allowed_min_rxd) {
1233 printf("Fail: input rxd (%u) can't be less "
1234 "than min_rxds (%u) of port %u\n",
1245 * Get the allowed maximum number of TXDs of every rx queues.
1246 * *pid return the port id which has minimal value of
1247 * max_txd in every tx queue.
1250 get_allowed_max_nb_txd(portid_t *pid)
1252 uint16_t allowed_max_txd = UINT16_MAX;
1254 struct rte_eth_dev_info dev_info;
1256 RTE_ETH_FOREACH_DEV(pi) {
1257 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1260 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1261 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1265 return allowed_max_txd;
1269 * Get the allowed maximum number of TXDs of every tx queues.
1270 * *pid return the port id which has minimal value of
1271 * min_txd in every tx queue.
1274 get_allowed_min_nb_txd(portid_t *pid)
1276 uint16_t allowed_min_txd = 0;
1278 struct rte_eth_dev_info dev_info;
1280 RTE_ETH_FOREACH_DEV(pi) {
1281 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1284 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1285 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1290 return allowed_min_txd;
1294 * Check input txd is valid or not.
1295 * If input txd is not greater than any of maximum number
1296 * of TXDs of every Rx queues, it is valid.
1297 * if valid, return 0, else return -1
1300 check_nb_txd(queueid_t txd)
1302 uint16_t allowed_max_txd;
1303 uint16_t allowed_min_txd;
1306 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1307 if (txd > allowed_max_txd) {
1308 printf("Fail: input txd (%u) can't be greater "
1309 "than max_txds (%u) of port %u\n",
1316 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1317 if (txd < allowed_min_txd) {
1318 printf("Fail: input txd (%u) can't be less "
1319 "than min_txds (%u) of port %u\n",
1330 * Get the allowed maximum number of hairpin queues.
1331 * *pid return the port id which has minimal value of
1332 * max_hairpin_queues in all ports.
1335 get_allowed_max_nb_hairpinq(portid_t *pid)
1337 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1339 struct rte_eth_hairpin_cap cap;
1341 RTE_ETH_FOREACH_DEV(pi) {
1342 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1346 if (cap.max_nb_queues < allowed_max_hairpinq) {
1347 allowed_max_hairpinq = cap.max_nb_queues;
1351 return allowed_max_hairpinq;
1355 * Check input hairpin is valid or not.
1356 * If input hairpin is not greater than any of maximum number
1357 * of hairpin queues of all ports, it is valid.
1358 * if valid, return 0, else return -1
1361 check_nb_hairpinq(queueid_t hairpinq)
1363 queueid_t allowed_max_hairpinq;
1366 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1367 if (hairpinq > allowed_max_hairpinq) {
1368 printf("Fail: input hairpin (%u) can't be greater "
1369 "than max_hairpin_queues (%u) of port %u\n",
1370 hairpinq, allowed_max_hairpinq, pid);
1380 struct rte_port *port;
1381 struct rte_mempool *mbp;
1382 unsigned int nb_mbuf_per_pool;
1384 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1385 struct rte_gro_param gro_param;
1392 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1394 /* Configuration of logical cores. */
1395 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1396 sizeof(struct fwd_lcore *) * nb_lcores,
1397 RTE_CACHE_LINE_SIZE);
1398 if (fwd_lcores == NULL) {
1399 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1400 "failed\n", nb_lcores);
1402 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1403 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1404 sizeof(struct fwd_lcore),
1405 RTE_CACHE_LINE_SIZE);
1406 if (fwd_lcores[lc_id] == NULL) {
1407 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1410 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1413 RTE_ETH_FOREACH_DEV(pid) {
1415 /* Apply default TxRx configuration for all ports */
1416 port->dev_conf.txmode = tx_mode;
1417 port->dev_conf.rxmode = rx_mode;
1419 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1421 rte_exit(EXIT_FAILURE,
1422 "rte_eth_dev_info_get() failed\n");
1424 if (!(port->dev_info.tx_offload_capa &
1425 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1426 port->dev_conf.txmode.offloads &=
1427 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1429 if (port_numa[pid] != NUMA_NO_CONFIG)
1430 port_per_socket[port_numa[pid]]++;
1432 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1435 * if socket_id is invalid,
1436 * set to the first available socket.
1438 if (check_socket_id(socket_id) < 0)
1439 socket_id = socket_ids[0];
1440 port_per_socket[socket_id]++;
1444 /* Apply Rx offloads configuration */
1445 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1446 port->rx_conf[k].offloads =
1447 port->dev_conf.rxmode.offloads;
1448 /* Apply Tx offloads configuration */
1449 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1450 port->tx_conf[k].offloads =
1451 port->dev_conf.txmode.offloads;
1453 /* set flag to initialize port/queue */
1454 port->need_reconfig = 1;
1455 port->need_reconfig_queues = 1;
1456 port->tx_metadata = 0;
1458 /* Check for maximum number of segments per MTU. Accordingly
1459 * update the mbuf data size.
1461 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1462 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1463 data_size = rx_mode.max_rx_pkt_len /
1464 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1466 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1468 mbuf_data_size = data_size +
1469 RTE_PKTMBUF_HEADROOM;
1476 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1480 * Create pools of mbuf.
1481 * If NUMA support is disabled, create a single pool of mbuf in
1482 * socket 0 memory by default.
1483 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1485 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1486 * nb_txd can be configured at run time.
1488 if (param_total_num_mbufs)
1489 nb_mbuf_per_pool = param_total_num_mbufs;
1491 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1492 (nb_lcores * mb_mempool_cache) +
1493 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1494 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1500 for (i = 0; i < num_sockets; i++)
1501 mempools[i] = mbuf_pool_create(mbuf_data_size,
1505 if (socket_num == UMA_NO_CONFIG)
1506 mempools[0] = mbuf_pool_create(mbuf_data_size,
1507 nb_mbuf_per_pool, 0);
1509 mempools[socket_num] = mbuf_pool_create
1517 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1518 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1520 * Records which Mbuf pool to use by each logical core, if needed.
1522 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1523 mbp = mbuf_pool_find(
1524 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1527 mbp = mbuf_pool_find(0);
1528 fwd_lcores[lc_id]->mbp = mbp;
1529 /* initialize GSO context */
1530 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1531 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1532 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1533 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1535 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1538 /* Configuration of packet forwarding streams. */
1539 if (init_fwd_streams() < 0)
1540 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1544 /* create a gro context for each lcore */
1545 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1546 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1547 gro_param.max_item_per_flow = MAX_PKT_BURST;
1548 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1549 gro_param.socket_id = rte_lcore_to_socket_id(
1550 fwd_lcores_cpuids[lc_id]);
1551 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1552 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1553 rte_exit(EXIT_FAILURE,
1554 "rte_gro_ctx_create() failed\n");
1558 #if defined RTE_LIBRTE_PMD_SOFTNIC
1559 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1560 RTE_ETH_FOREACH_DEV(pid) {
1562 const char *driver = port->dev_info.driver_name;
1564 if (strcmp(driver, "net_softnic") == 0)
1565 port->softport.fwd_lcore_arg = fwd_lcores;
1574 reconfig(portid_t new_port_id, unsigned socket_id)
1576 struct rte_port *port;
1579 /* Reconfiguration of Ethernet ports. */
1580 port = &ports[new_port_id];
1582 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1586 /* set flag to initialize port/queue */
1587 port->need_reconfig = 1;
1588 port->need_reconfig_queues = 1;
1589 port->socket_id = socket_id;
1596 init_fwd_streams(void)
1599 struct rte_port *port;
1600 streamid_t sm_id, nb_fwd_streams_new;
1603 /* set socket id according to numa or not */
1604 RTE_ETH_FOREACH_DEV(pid) {
1606 if (nb_rxq > port->dev_info.max_rx_queues) {
1607 printf("Fail: nb_rxq(%d) is greater than "
1608 "max_rx_queues(%d)\n", nb_rxq,
1609 port->dev_info.max_rx_queues);
1612 if (nb_txq > port->dev_info.max_tx_queues) {
1613 printf("Fail: nb_txq(%d) is greater than "
1614 "max_tx_queues(%d)\n", nb_txq,
1615 port->dev_info.max_tx_queues);
1619 if (port_numa[pid] != NUMA_NO_CONFIG)
1620 port->socket_id = port_numa[pid];
1622 port->socket_id = rte_eth_dev_socket_id(pid);
1625 * if socket_id is invalid,
1626 * set to the first available socket.
1628 if (check_socket_id(port->socket_id) < 0)
1629 port->socket_id = socket_ids[0];
1633 if (socket_num == UMA_NO_CONFIG)
1634 port->socket_id = 0;
1636 port->socket_id = socket_num;
1640 q = RTE_MAX(nb_rxq, nb_txq);
1642 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1645 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1646 if (nb_fwd_streams_new == nb_fwd_streams)
1649 if (fwd_streams != NULL) {
1650 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1651 if (fwd_streams[sm_id] == NULL)
1653 rte_free(fwd_streams[sm_id]);
1654 fwd_streams[sm_id] = NULL;
1656 rte_free(fwd_streams);
1661 nb_fwd_streams = nb_fwd_streams_new;
1662 if (nb_fwd_streams) {
1663 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1664 sizeof(struct fwd_stream *) * nb_fwd_streams,
1665 RTE_CACHE_LINE_SIZE);
1666 if (fwd_streams == NULL)
1667 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1668 " (struct fwd_stream *)) failed\n",
1671 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1672 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1673 " struct fwd_stream", sizeof(struct fwd_stream),
1674 RTE_CACHE_LINE_SIZE);
1675 if (fwd_streams[sm_id] == NULL)
1676 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1677 "(struct fwd_stream) failed\n");
1684 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1686 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1688 unsigned int total_burst;
1689 unsigned int nb_burst;
1690 unsigned int burst_stats[3];
1691 uint16_t pktnb_stats[3];
1693 int burst_percent[3];
1696 * First compute the total number of packet bursts and the
1697 * two highest numbers of bursts of the same number of packets.
1700 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1701 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1702 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1703 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1706 total_burst += nb_burst;
1707 if (nb_burst > burst_stats[0]) {
1708 burst_stats[1] = burst_stats[0];
1709 pktnb_stats[1] = pktnb_stats[0];
1710 burst_stats[0] = nb_burst;
1711 pktnb_stats[0] = nb_pkt;
1712 } else if (nb_burst > burst_stats[1]) {
1713 burst_stats[1] = nb_burst;
1714 pktnb_stats[1] = nb_pkt;
1717 if (total_burst == 0)
1719 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1720 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1721 burst_percent[0], (int) pktnb_stats[0]);
1722 if (burst_stats[0] == total_burst) {
1726 if (burst_stats[0] + burst_stats[1] == total_burst) {
1727 printf(" + %d%% of %d pkts]\n",
1728 100 - burst_percent[0], pktnb_stats[1]);
1731 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1732 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1733 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1734 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1737 printf(" + %d%% of %d pkts + %d%% of others]\n",
1738 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1740 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1743 fwd_stream_stats_display(streamid_t stream_id)
1745 struct fwd_stream *fs;
1746 static const char *fwd_top_stats_border = "-------";
1748 fs = fwd_streams[stream_id];
1749 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1750 (fs->fwd_dropped == 0))
1752 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1753 "TX Port=%2d/Queue=%2d %s\n",
1754 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1755 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1756 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1757 " TX-dropped: %-14"PRIu64,
1758 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1760 /* if checksum mode */
1761 if (cur_fwd_eng == &csum_fwd_engine) {
1762 printf(" RX- bad IP checksum: %-14"PRIu64
1763 " Rx- bad L4 checksum: %-14"PRIu64
1764 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1765 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1766 fs->rx_bad_outer_l4_csum);
1771 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1772 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1773 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1778 fwd_stats_display(void)
1780 static const char *fwd_stats_border = "----------------------";
1781 static const char *acc_stats_border = "+++++++++++++++";
1783 struct fwd_stream *rx_stream;
1784 struct fwd_stream *tx_stream;
1785 uint64_t tx_dropped;
1786 uint64_t rx_bad_ip_csum;
1787 uint64_t rx_bad_l4_csum;
1788 uint64_t rx_bad_outer_l4_csum;
1789 } ports_stats[RTE_MAX_ETHPORTS];
1790 uint64_t total_rx_dropped = 0;
1791 uint64_t total_tx_dropped = 0;
1792 uint64_t total_rx_nombuf = 0;
1793 struct rte_eth_stats stats;
1794 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1795 uint64_t fwd_cycles = 0;
1797 uint64_t total_recv = 0;
1798 uint64_t total_xmit = 0;
1799 struct rte_port *port;
1804 memset(ports_stats, 0, sizeof(ports_stats));
1806 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1807 struct fwd_stream *fs = fwd_streams[sm_id];
1809 if (cur_fwd_config.nb_fwd_streams >
1810 cur_fwd_config.nb_fwd_ports) {
1811 fwd_stream_stats_display(sm_id);
1813 ports_stats[fs->tx_port].tx_stream = fs;
1814 ports_stats[fs->rx_port].rx_stream = fs;
1817 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1819 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1820 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1821 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1822 fs->rx_bad_outer_l4_csum;
1824 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1825 fwd_cycles += fs->core_cycles;
1828 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1831 pt_id = fwd_ports_ids[i];
1832 port = &ports[pt_id];
1834 rte_eth_stats_get(pt_id, &stats);
1835 stats.ipackets -= port->stats.ipackets;
1836 stats.opackets -= port->stats.opackets;
1837 stats.ibytes -= port->stats.ibytes;
1838 stats.obytes -= port->stats.obytes;
1839 stats.imissed -= port->stats.imissed;
1840 stats.oerrors -= port->stats.oerrors;
1841 stats.rx_nombuf -= port->stats.rx_nombuf;
1843 total_recv += stats.ipackets;
1844 total_xmit += stats.opackets;
1845 total_rx_dropped += stats.imissed;
1846 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1847 total_tx_dropped += stats.oerrors;
1848 total_rx_nombuf += stats.rx_nombuf;
1850 printf("\n %s Forward statistics for port %-2d %s\n",
1851 fwd_stats_border, pt_id, fwd_stats_border);
1853 if (!port->rx_queue_stats_mapping_enabled &&
1854 !port->tx_queue_stats_mapping_enabled) {
1855 printf(" RX-packets: %-14"PRIu64
1856 " RX-dropped: %-14"PRIu64
1857 "RX-total: %-"PRIu64"\n",
1858 stats.ipackets, stats.imissed,
1859 stats.ipackets + stats.imissed);
1861 if (cur_fwd_eng == &csum_fwd_engine)
1862 printf(" Bad-ipcsum: %-14"PRIu64
1863 " Bad-l4csum: %-14"PRIu64
1864 "Bad-outer-l4csum: %-14"PRIu64"\n",
1865 ports_stats[pt_id].rx_bad_ip_csum,
1866 ports_stats[pt_id].rx_bad_l4_csum,
1867 ports_stats[pt_id].rx_bad_outer_l4_csum);
1868 if (stats.ierrors + stats.rx_nombuf > 0) {
1869 printf(" RX-error: %-"PRIu64"\n",
1871 printf(" RX-nombufs: %-14"PRIu64"\n",
1875 printf(" TX-packets: %-14"PRIu64
1876 " TX-dropped: %-14"PRIu64
1877 "TX-total: %-"PRIu64"\n",
1878 stats.opackets, ports_stats[pt_id].tx_dropped,
1879 stats.opackets + ports_stats[pt_id].tx_dropped);
1881 printf(" RX-packets: %14"PRIu64
1882 " RX-dropped:%14"PRIu64
1883 " RX-total:%14"PRIu64"\n",
1884 stats.ipackets, stats.imissed,
1885 stats.ipackets + stats.imissed);
1887 if (cur_fwd_eng == &csum_fwd_engine)
1888 printf(" Bad-ipcsum:%14"PRIu64
1889 " Bad-l4csum:%14"PRIu64
1890 " Bad-outer-l4csum: %-14"PRIu64"\n",
1891 ports_stats[pt_id].rx_bad_ip_csum,
1892 ports_stats[pt_id].rx_bad_l4_csum,
1893 ports_stats[pt_id].rx_bad_outer_l4_csum);
1894 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1895 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1896 printf(" RX-nombufs: %14"PRIu64"\n",
1900 printf(" TX-packets: %14"PRIu64
1901 " TX-dropped:%14"PRIu64
1902 " TX-total:%14"PRIu64"\n",
1903 stats.opackets, ports_stats[pt_id].tx_dropped,
1904 stats.opackets + ports_stats[pt_id].tx_dropped);
1907 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1908 if (ports_stats[pt_id].rx_stream)
1909 pkt_burst_stats_display("RX",
1910 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1911 if (ports_stats[pt_id].tx_stream)
1912 pkt_burst_stats_display("TX",
1913 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1916 if (port->rx_queue_stats_mapping_enabled) {
1918 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1919 printf(" Stats reg %2d RX-packets:%14"PRIu64
1920 " RX-errors:%14"PRIu64
1921 " RX-bytes:%14"PRIu64"\n",
1922 j, stats.q_ipackets[j],
1923 stats.q_errors[j], stats.q_ibytes[j]);
1927 if (port->tx_queue_stats_mapping_enabled) {
1928 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1929 printf(" Stats reg %2d TX-packets:%14"PRIu64
1932 j, stats.q_opackets[j],
1937 printf(" %s--------------------------------%s\n",
1938 fwd_stats_border, fwd_stats_border);
1941 printf("\n %s Accumulated forward statistics for all ports"
1943 acc_stats_border, acc_stats_border);
1944 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1946 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1948 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1949 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1950 if (total_rx_nombuf > 0)
1951 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1952 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1954 acc_stats_border, acc_stats_border);
1955 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1957 printf("\n CPU cycles/packet=%u (total cycles="
1958 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1959 (unsigned int)(fwd_cycles / total_recv),
1960 fwd_cycles, total_recv);
1965 fwd_stats_reset(void)
1971 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1972 pt_id = fwd_ports_ids[i];
1973 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1975 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1976 struct fwd_stream *fs = fwd_streams[sm_id];
1980 fs->fwd_dropped = 0;
1981 fs->rx_bad_ip_csum = 0;
1982 fs->rx_bad_l4_csum = 0;
1983 fs->rx_bad_outer_l4_csum = 0;
1985 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1986 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1987 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1989 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1990 fs->core_cycles = 0;
1996 flush_fwd_rx_queues(void)
1998 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2005 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2006 uint64_t timer_period;
2008 /* convert to number of cycles */
2009 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2011 for (j = 0; j < 2; j++) {
2012 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2013 for (rxq = 0; rxq < nb_rxq; rxq++) {
2014 port_id = fwd_ports_ids[rxp];
2016 * testpmd can stuck in the below do while loop
2017 * if rte_eth_rx_burst() always returns nonzero
2018 * packets. So timer is added to exit this loop
2019 * after 1sec timer expiry.
2021 prev_tsc = rte_rdtsc();
2023 nb_rx = rte_eth_rx_burst(port_id, rxq,
2024 pkts_burst, MAX_PKT_BURST);
2025 for (i = 0; i < nb_rx; i++)
2026 rte_pktmbuf_free(pkts_burst[i]);
2028 cur_tsc = rte_rdtsc();
2029 diff_tsc = cur_tsc - prev_tsc;
2030 timer_tsc += diff_tsc;
2031 } while ((nb_rx > 0) &&
2032 (timer_tsc < timer_period));
2036 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2041 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2043 struct fwd_stream **fsm;
2046 #ifdef RTE_LIBRTE_BITRATE
2047 uint64_t tics_per_1sec;
2048 uint64_t tics_datum;
2049 uint64_t tics_current;
2050 uint16_t i, cnt_ports;
2052 cnt_ports = nb_ports;
2053 tics_datum = rte_rdtsc();
2054 tics_per_1sec = rte_get_timer_hz();
2056 fsm = &fwd_streams[fc->stream_idx];
2057 nb_fs = fc->stream_nb;
2059 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2060 (*pkt_fwd)(fsm[sm_id]);
2061 #ifdef RTE_LIBRTE_BITRATE
2062 if (bitrate_enabled != 0 &&
2063 bitrate_lcore_id == rte_lcore_id()) {
2064 tics_current = rte_rdtsc();
2065 if (tics_current - tics_datum >= tics_per_1sec) {
2066 /* Periodic bitrate calculation */
2067 for (i = 0; i < cnt_ports; i++)
2068 rte_stats_bitrate_calc(bitrate_data,
2070 tics_datum = tics_current;
2074 #ifdef RTE_LIBRTE_LATENCY_STATS
2075 if (latencystats_enabled != 0 &&
2076 latencystats_lcore_id == rte_lcore_id())
2077 rte_latencystats_update();
2080 } while (! fc->stopped);
2084 start_pkt_forward_on_core(void *fwd_arg)
2086 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2087 cur_fwd_config.fwd_eng->packet_fwd);
2092 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2093 * Used to start communication flows in network loopback test configurations.
2096 run_one_txonly_burst_on_core(void *fwd_arg)
2098 struct fwd_lcore *fwd_lc;
2099 struct fwd_lcore tmp_lcore;
2101 fwd_lc = (struct fwd_lcore *) fwd_arg;
2102 tmp_lcore = *fwd_lc;
2103 tmp_lcore.stopped = 1;
2104 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2109 * Launch packet forwarding:
2110 * - Setup per-port forwarding context.
2111 * - launch logical cores with their forwarding configuration.
2114 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2116 port_fwd_begin_t port_fwd_begin;
2121 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2122 if (port_fwd_begin != NULL) {
2123 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2124 (*port_fwd_begin)(fwd_ports_ids[i]);
2126 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2127 lc_id = fwd_lcores_cpuids[i];
2128 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2129 fwd_lcores[i]->stopped = 0;
2130 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2131 fwd_lcores[i], lc_id);
2133 printf("launch lcore %u failed - diag=%d\n",
2140 * Launch packet forwarding configuration.
2143 start_packet_forwarding(int with_tx_first)
2145 port_fwd_begin_t port_fwd_begin;
2146 port_fwd_end_t port_fwd_end;
2147 struct rte_port *port;
2151 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2152 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2154 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2155 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2157 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2158 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2159 (!nb_rxq || !nb_txq))
2160 rte_exit(EXIT_FAILURE,
2161 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2162 cur_fwd_eng->fwd_mode_name);
2164 if (all_ports_started() == 0) {
2165 printf("Not all ports were started\n");
2168 if (test_done == 0) {
2169 printf("Packet forwarding already started\n");
2175 for (i = 0; i < nb_fwd_ports; i++) {
2176 pt_id = fwd_ports_ids[i];
2177 port = &ports[pt_id];
2178 if (!port->dcb_flag) {
2179 printf("In DCB mode, all forwarding ports must "
2180 "be configured in this mode.\n");
2184 if (nb_fwd_lcores == 1) {
2185 printf("In DCB mode,the nb forwarding cores "
2186 "should be larger than 1.\n");
2195 flush_fwd_rx_queues();
2197 pkt_fwd_config_display(&cur_fwd_config);
2198 rxtx_config_display();
2201 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2202 pt_id = fwd_ports_ids[i];
2203 port = &ports[pt_id];
2204 map_port_queue_stats_mapping_registers(pt_id, port);
2206 if (with_tx_first) {
2207 port_fwd_begin = tx_only_engine.port_fwd_begin;
2208 if (port_fwd_begin != NULL) {
2209 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2210 (*port_fwd_begin)(fwd_ports_ids[i]);
2212 while (with_tx_first--) {
2213 launch_packet_forwarding(
2214 run_one_txonly_burst_on_core);
2215 rte_eal_mp_wait_lcore();
2217 port_fwd_end = tx_only_engine.port_fwd_end;
2218 if (port_fwd_end != NULL) {
2219 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2220 (*port_fwd_end)(fwd_ports_ids[i]);
2223 launch_packet_forwarding(start_pkt_forward_on_core);
2227 stop_packet_forwarding(void)
2229 port_fwd_end_t port_fwd_end;
2235 printf("Packet forwarding not started\n");
2238 printf("Telling cores to stop...");
2239 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2240 fwd_lcores[lc_id]->stopped = 1;
2241 printf("\nWaiting for lcores to finish...\n");
2242 rte_eal_mp_wait_lcore();
2243 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2244 if (port_fwd_end != NULL) {
2245 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2246 pt_id = fwd_ports_ids[i];
2247 (*port_fwd_end)(pt_id);
2251 fwd_stats_display();
2253 printf("\nDone.\n");
2258 dev_set_link_up(portid_t pid)
2260 if (rte_eth_dev_set_link_up(pid) < 0)
2261 printf("\nSet link up fail.\n");
2265 dev_set_link_down(portid_t pid)
2267 if (rte_eth_dev_set_link_down(pid) < 0)
2268 printf("\nSet link down fail.\n");
2272 all_ports_started(void)
2275 struct rte_port *port;
2277 RTE_ETH_FOREACH_DEV(pi) {
2279 /* Check if there is a port which is not started */
2280 if ((port->port_status != RTE_PORT_STARTED) &&
2281 (port->slave_flag == 0))
2285 /* No port is not started */
2290 port_is_stopped(portid_t port_id)
2292 struct rte_port *port = &ports[port_id];
2294 if ((port->port_status != RTE_PORT_STOPPED) &&
2295 (port->slave_flag == 0))
2301 all_ports_stopped(void)
2305 RTE_ETH_FOREACH_DEV(pi) {
2306 if (!port_is_stopped(pi))
2314 port_is_started(portid_t port_id)
2316 if (port_id_is_invalid(port_id, ENABLED_WARN))
2319 if (ports[port_id].port_status != RTE_PORT_STARTED)
2325 /* Configure the Rx and Tx hairpin queues for the selected port. */
2327 setup_hairpin_queues(portid_t pi)
2330 struct rte_eth_hairpin_conf hairpin_conf = {
2335 struct rte_port *port = &ports[pi];
2337 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2338 hairpin_conf.peers[0].port = pi;
2339 hairpin_conf.peers[0].queue = i + nb_rxq;
2340 diag = rte_eth_tx_hairpin_queue_setup
2341 (pi, qi, nb_txd, &hairpin_conf);
2346 /* Fail to setup rx queue, return */
2347 if (rte_atomic16_cmpset(&(port->port_status),
2349 RTE_PORT_STOPPED) == 0)
2350 printf("Port %d can not be set back "
2351 "to stopped\n", pi);
2352 printf("Fail to configure port %d hairpin "
2354 /* try to reconfigure queues next time */
2355 port->need_reconfig_queues = 1;
2358 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2359 hairpin_conf.peers[0].port = pi;
2360 hairpin_conf.peers[0].queue = i + nb_txq;
2361 diag = rte_eth_rx_hairpin_queue_setup
2362 (pi, qi, nb_rxd, &hairpin_conf);
2367 /* Fail to setup rx queue, return */
2368 if (rte_atomic16_cmpset(&(port->port_status),
2370 RTE_PORT_STOPPED) == 0)
2371 printf("Port %d can not be set back "
2372 "to stopped\n", pi);
2373 printf("Fail to configure port %d hairpin "
2375 /* try to reconfigure queues next time */
2376 port->need_reconfig_queues = 1;
2383 start_port(portid_t pid)
2385 int diag, need_check_link_status = -1;
2388 struct rte_port *port;
2389 struct rte_ether_addr mac_addr;
2390 struct rte_eth_hairpin_cap cap;
2392 if (port_id_is_invalid(pid, ENABLED_WARN))
2397 RTE_ETH_FOREACH_DEV(pi) {
2398 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2401 need_check_link_status = 0;
2403 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2404 RTE_PORT_HANDLING) == 0) {
2405 printf("Port %d is now not stopped\n", pi);
2409 if (port->need_reconfig > 0) {
2410 port->need_reconfig = 0;
2412 if (flow_isolate_all) {
2413 int ret = port_flow_isolate(pi, 1);
2415 printf("Failed to apply isolated"
2416 " mode on port %d\n", pi);
2420 configure_rxtx_dump_callbacks(0);
2421 printf("Configuring Port %d (socket %u)\n", pi,
2423 if (nb_hairpinq > 0 &&
2424 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2425 printf("Port %d doesn't support hairpin "
2429 /* configure port */
2430 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2431 nb_txq + nb_hairpinq,
2434 if (rte_atomic16_cmpset(&(port->port_status),
2435 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2436 printf("Port %d can not be set back "
2437 "to stopped\n", pi);
2438 printf("Fail to configure port %d\n", pi);
2439 /* try to reconfigure port next time */
2440 port->need_reconfig = 1;
2444 if (port->need_reconfig_queues > 0) {
2445 port->need_reconfig_queues = 0;
2446 /* setup tx queues */
2447 for (qi = 0; qi < nb_txq; qi++) {
2448 if ((numa_support) &&
2449 (txring_numa[pi] != NUMA_NO_CONFIG))
2450 diag = rte_eth_tx_queue_setup(pi, qi,
2451 port->nb_tx_desc[qi],
2453 &(port->tx_conf[qi]));
2455 diag = rte_eth_tx_queue_setup(pi, qi,
2456 port->nb_tx_desc[qi],
2458 &(port->tx_conf[qi]));
2463 /* Fail to setup tx queue, return */
2464 if (rte_atomic16_cmpset(&(port->port_status),
2466 RTE_PORT_STOPPED) == 0)
2467 printf("Port %d can not be set back "
2468 "to stopped\n", pi);
2469 printf("Fail to configure port %d tx queues\n",
2471 /* try to reconfigure queues next time */
2472 port->need_reconfig_queues = 1;
2475 for (qi = 0; qi < nb_rxq; qi++) {
2476 /* setup rx queues */
2477 if ((numa_support) &&
2478 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2479 struct rte_mempool * mp =
2480 mbuf_pool_find(rxring_numa[pi]);
2482 printf("Failed to setup RX queue:"
2483 "No mempool allocation"
2484 " on the socket %d\n",
2489 diag = rte_eth_rx_queue_setup(pi, qi,
2490 port->nb_rx_desc[qi],
2492 &(port->rx_conf[qi]),
2495 struct rte_mempool *mp =
2496 mbuf_pool_find(port->socket_id);
2498 printf("Failed to setup RX queue:"
2499 "No mempool allocation"
2500 " on the socket %d\n",
2504 diag = rte_eth_rx_queue_setup(pi, qi,
2505 port->nb_rx_desc[qi],
2507 &(port->rx_conf[qi]),
2513 /* Fail to setup rx queue, return */
2514 if (rte_atomic16_cmpset(&(port->port_status),
2516 RTE_PORT_STOPPED) == 0)
2517 printf("Port %d can not be set back "
2518 "to stopped\n", pi);
2519 printf("Fail to configure port %d rx queues\n",
2521 /* try to reconfigure queues next time */
2522 port->need_reconfig_queues = 1;
2525 /* setup hairpin queues */
2526 if (setup_hairpin_queues(pi) != 0)
2529 configure_rxtx_dump_callbacks(verbose_level);
2531 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2535 "Port %d: Failed to disable Ptype parsing\n",
2540 if (rte_eth_dev_start(pi) < 0) {
2541 printf("Fail to start port %d\n", pi);
2543 /* Fail to setup rx queue, return */
2544 if (rte_atomic16_cmpset(&(port->port_status),
2545 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2546 printf("Port %d can not be set back to "
2551 if (rte_atomic16_cmpset(&(port->port_status),
2552 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2553 printf("Port %d can not be set into started\n", pi);
2555 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2556 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2557 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2558 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2559 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2561 /* at least one port started, need checking link status */
2562 need_check_link_status = 1;
2565 if (need_check_link_status == 1 && !no_link_check)
2566 check_all_ports_link_status(RTE_PORT_ALL);
2567 else if (need_check_link_status == 0)
2568 printf("Please stop the ports first\n");
2575 stop_port(portid_t pid)
2578 struct rte_port *port;
2579 int need_check_link_status = 0;
2586 if (port_id_is_invalid(pid, ENABLED_WARN))
2589 printf("Stopping ports...\n");
2591 RTE_ETH_FOREACH_DEV(pi) {
2592 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2595 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2596 printf("Please remove port %d from forwarding configuration.\n", pi);
2600 if (port_is_bonding_slave(pi)) {
2601 printf("Please remove port %d from bonded device.\n", pi);
2606 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2607 RTE_PORT_HANDLING) == 0)
2610 rte_eth_dev_stop(pi);
2612 if (rte_atomic16_cmpset(&(port->port_status),
2613 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2614 printf("Port %d can not be set into stopped\n", pi);
2615 need_check_link_status = 1;
2617 if (need_check_link_status && !no_link_check)
2618 check_all_ports_link_status(RTE_PORT_ALL);
2624 remove_invalid_ports_in(portid_t *array, portid_t *total)
2627 portid_t new_total = 0;
2629 for (i = 0; i < *total; i++)
2630 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2631 array[new_total] = array[i];
2638 remove_invalid_ports(void)
2640 remove_invalid_ports_in(ports_ids, &nb_ports);
2641 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2642 nb_cfg_ports = nb_fwd_ports;
2646 close_port(portid_t pid)
2649 struct rte_port *port;
2651 if (port_id_is_invalid(pid, ENABLED_WARN))
2654 printf("Closing ports...\n");
2656 RTE_ETH_FOREACH_DEV(pi) {
2657 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2660 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2661 printf("Please remove port %d from forwarding configuration.\n", pi);
2665 if (port_is_bonding_slave(pi)) {
2666 printf("Please remove port %d from bonded device.\n", pi);
2671 if (rte_atomic16_cmpset(&(port->port_status),
2672 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2673 printf("Port %d is already closed\n", pi);
2677 if (rte_atomic16_cmpset(&(port->port_status),
2678 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2679 printf("Port %d is now not stopped\n", pi);
2683 if (port->flow_list)
2684 port_flow_flush(pi);
2685 rte_eth_dev_close(pi);
2687 remove_invalid_ports();
2689 if (rte_atomic16_cmpset(&(port->port_status),
2690 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2691 printf("Port %d cannot be set to closed\n", pi);
2698 reset_port(portid_t pid)
2702 struct rte_port *port;
2704 if (port_id_is_invalid(pid, ENABLED_WARN))
2707 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2708 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2709 printf("Can not reset port(s), please stop port(s) first.\n");
2713 printf("Resetting ports...\n");
2715 RTE_ETH_FOREACH_DEV(pi) {
2716 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2719 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2720 printf("Please remove port %d from forwarding "
2721 "configuration.\n", pi);
2725 if (port_is_bonding_slave(pi)) {
2726 printf("Please remove port %d from bonded device.\n",
2731 diag = rte_eth_dev_reset(pi);
2734 port->need_reconfig = 1;
2735 port->need_reconfig_queues = 1;
2737 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2745 attach_port(char *identifier)
2748 struct rte_dev_iterator iterator;
2750 printf("Attaching a new port...\n");
2752 if (identifier == NULL) {
2753 printf("Invalid parameters are specified\n");
2757 if (rte_dev_probe(identifier) < 0) {
2758 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2762 /* first attach mode: event */
2763 if (setup_on_probe_event) {
2764 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2765 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2766 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2767 ports[pi].need_setup != 0)
2768 setup_attached_port(pi);
2772 /* second attach mode: iterator */
2773 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2774 /* setup ports matching the devargs used for probing */
2775 if (port_is_forwarding(pi))
2776 continue; /* port was already attached before */
2777 setup_attached_port(pi);
2782 setup_attached_port(portid_t pi)
2784 unsigned int socket_id;
2787 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2788 /* if socket_id is invalid, set to the first available socket. */
2789 if (check_socket_id(socket_id) < 0)
2790 socket_id = socket_ids[0];
2791 reconfig(pi, socket_id);
2792 ret = rte_eth_promiscuous_enable(pi);
2794 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2795 pi, rte_strerror(-ret));
2797 ports_ids[nb_ports++] = pi;
2798 fwd_ports_ids[nb_fwd_ports++] = pi;
2799 nb_cfg_ports = nb_fwd_ports;
2800 ports[pi].need_setup = 0;
2801 ports[pi].port_status = RTE_PORT_STOPPED;
2803 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2808 detach_device(struct rte_device *dev)
2813 printf("Device already removed\n");
2817 printf("Removing a device...\n");
2819 if (rte_dev_remove(dev) < 0) {
2820 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2823 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2824 /* reset mapping between old ports and removed device */
2825 rte_eth_devices[sibling].device = NULL;
2826 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2827 /* sibling ports are forced to be closed */
2828 ports[sibling].port_status = RTE_PORT_CLOSED;
2829 printf("Port %u is closed\n", sibling);
2833 remove_invalid_ports();
2835 printf("Device is detached\n");
2836 printf("Now total ports is %d\n", nb_ports);
2842 detach_port_device(portid_t port_id)
2844 if (port_id_is_invalid(port_id, ENABLED_WARN))
2847 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2848 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2849 printf("Port not stopped\n");
2852 printf("Port was not closed\n");
2853 if (ports[port_id].flow_list)
2854 port_flow_flush(port_id);
2857 detach_device(rte_eth_devices[port_id].device);
2861 detach_devargs(char *identifier)
2863 struct rte_dev_iterator iterator;
2864 struct rte_devargs da;
2867 printf("Removing a device...\n");
2869 memset(&da, 0, sizeof(da));
2870 if (rte_devargs_parsef(&da, "%s", identifier)) {
2871 printf("cannot parse identifier\n");
2877 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2878 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2879 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2880 printf("Port %u not stopped\n", port_id);
2881 rte_eth_iterator_cleanup(&iterator);
2885 /* sibling ports are forced to be closed */
2886 if (ports[port_id].flow_list)
2887 port_flow_flush(port_id);
2888 ports[port_id].port_status = RTE_PORT_CLOSED;
2889 printf("Port %u is now closed\n", port_id);
2893 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2894 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2895 da.name, da.bus->name);
2899 remove_invalid_ports();
2901 printf("Device %s is detached\n", identifier);
2902 printf("Now total ports is %d\n", nb_ports);
2914 stop_packet_forwarding();
2916 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2918 if (mp_alloc_type == MP_ALLOC_ANON)
2919 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2923 if (ports != NULL) {
2925 RTE_ETH_FOREACH_DEV(pt_id) {
2926 printf("\nStopping port %d...\n", pt_id);
2930 RTE_ETH_FOREACH_DEV(pt_id) {
2931 printf("\nShutting down port %d...\n", pt_id);
2938 ret = rte_dev_event_monitor_stop();
2941 "fail to stop device event monitor.");
2945 ret = rte_dev_event_callback_unregister(NULL,
2946 dev_event_callback, NULL);
2949 "fail to unregister device event callback.\n");
2953 ret = rte_dev_hotplug_handle_disable();
2956 "fail to disable hotplug handling.\n");
2960 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2962 rte_mempool_free(mempools[i]);
2965 printf("\nBye...\n");
2968 typedef void (*cmd_func_t)(void);
2969 struct pmd_test_command {
2970 const char *cmd_name;
2971 cmd_func_t cmd_func;
2974 /* Check the link status of all ports in up to 9s, and print them finally */
2976 check_all_ports_link_status(uint32_t port_mask)
2978 #define CHECK_INTERVAL 100 /* 100ms */
2979 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2981 uint8_t count, all_ports_up, print_flag = 0;
2982 struct rte_eth_link link;
2985 printf("Checking link statuses...\n");
2987 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2989 RTE_ETH_FOREACH_DEV(portid) {
2990 if ((port_mask & (1 << portid)) == 0)
2992 memset(&link, 0, sizeof(link));
2993 ret = rte_eth_link_get_nowait(portid, &link);
2996 if (print_flag == 1)
2997 printf("Port %u link get failed: %s\n",
2998 portid, rte_strerror(-ret));
3001 /* print link status if flag set */
3002 if (print_flag == 1) {
3003 if (link.link_status)
3005 "Port%d Link Up. speed %u Mbps- %s\n",
3006 portid, link.link_speed,
3007 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3008 ("full-duplex") : ("half-duplex\n"));
3010 printf("Port %d Link Down\n", portid);
3013 /* clear all_ports_up flag if any link down */
3014 if (link.link_status == ETH_LINK_DOWN) {
3019 /* after finally printing all link status, get out */
3020 if (print_flag == 1)
3023 if (all_ports_up == 0) {
3025 rte_delay_ms(CHECK_INTERVAL);
3028 /* set the print_flag if all ports up or timeout */
3029 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3039 * This callback is for remove a port for a device. It has limitation because
3040 * it is not for multiple port removal for a device.
3041 * TODO: the device detach invoke will plan to be removed from user side to
3042 * eal. And convert all PMDs to free port resources on ether device closing.
3045 rmv_port_callback(void *arg)
3047 int need_to_start = 0;
3048 int org_no_link_check = no_link_check;
3049 portid_t port_id = (intptr_t)arg;
3050 struct rte_device *dev;
3052 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3054 if (!test_done && port_is_forwarding(port_id)) {
3056 stop_packet_forwarding();
3060 no_link_check = org_no_link_check;
3062 /* Save rte_device pointer before closing ethdev port */
3063 dev = rte_eth_devices[port_id].device;
3064 close_port(port_id);
3065 detach_device(dev); /* might be already removed or have more ports */
3068 start_packet_forwarding(0);
3071 /* This function is used by the interrupt thread */
3073 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3076 RTE_SET_USED(param);
3077 RTE_SET_USED(ret_param);
3079 if (type >= RTE_ETH_EVENT_MAX) {
3080 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3081 port_id, __func__, type);
3083 } else if (event_print_mask & (UINT32_C(1) << type)) {
3084 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3085 eth_event_desc[type]);
3090 case RTE_ETH_EVENT_NEW:
3091 ports[port_id].need_setup = 1;
3092 ports[port_id].port_status = RTE_PORT_HANDLING;
3094 case RTE_ETH_EVENT_INTR_RMV:
3095 if (port_id_is_invalid(port_id, DISABLED_WARN))
3097 if (rte_eal_alarm_set(100000,
3098 rmv_port_callback, (void *)(intptr_t)port_id))
3099 fprintf(stderr, "Could not set up deferred device removal\n");
3108 register_eth_event_callback(void)
3111 enum rte_eth_event_type event;
3113 for (event = RTE_ETH_EVENT_UNKNOWN;
3114 event < RTE_ETH_EVENT_MAX; event++) {
3115 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3120 TESTPMD_LOG(ERR, "Failed to register callback for "
3121 "%s event\n", eth_event_desc[event]);
3129 /* This function is used by the interrupt thread */
3131 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3132 __rte_unused void *arg)
3137 if (type >= RTE_DEV_EVENT_MAX) {
3138 fprintf(stderr, "%s called upon invalid event %d\n",
3144 case RTE_DEV_EVENT_REMOVE:
3145 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3147 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3149 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3154 * Because the user's callback is invoked in eal interrupt
3155 * callback, the interrupt callback need to be finished before
3156 * it can be unregistered when detaching device. So finish
3157 * callback soon and use a deferred removal to detach device
3158 * is need. It is a workaround, once the device detaching be
3159 * moved into the eal in the future, the deferred removal could
3162 if (rte_eal_alarm_set(100000,
3163 rmv_port_callback, (void *)(intptr_t)port_id))
3165 "Could not set up deferred device removal\n");
3167 case RTE_DEV_EVENT_ADD:
3168 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3170 /* TODO: After finish kernel driver binding,
3171 * begin to attach port.
3180 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3184 uint8_t mapping_found = 0;
3186 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3187 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3188 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3189 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3190 tx_queue_stats_mappings[i].queue_id,
3191 tx_queue_stats_mappings[i].stats_counter_id);
3198 port->tx_queue_stats_mapping_enabled = 1;
3203 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3207 uint8_t mapping_found = 0;
3209 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3210 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3211 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3212 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3213 rx_queue_stats_mappings[i].queue_id,
3214 rx_queue_stats_mappings[i].stats_counter_id);
3221 port->rx_queue_stats_mapping_enabled = 1;
3226 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3230 diag = set_tx_queue_stats_mapping_registers(pi, port);
3232 if (diag == -ENOTSUP) {
3233 port->tx_queue_stats_mapping_enabled = 0;
3234 printf("TX queue stats mapping not supported port id=%d\n", pi);
3237 rte_exit(EXIT_FAILURE,
3238 "set_tx_queue_stats_mapping_registers "
3239 "failed for port id=%d diag=%d\n",
3243 diag = set_rx_queue_stats_mapping_registers(pi, port);
3245 if (diag == -ENOTSUP) {
3246 port->rx_queue_stats_mapping_enabled = 0;
3247 printf("RX queue stats mapping not supported port id=%d\n", pi);
3250 rte_exit(EXIT_FAILURE,
3251 "set_rx_queue_stats_mapping_registers "
3252 "failed for port id=%d diag=%d\n",
3258 rxtx_port_config(struct rte_port *port)
3263 for (qid = 0; qid < nb_rxq; qid++) {
3264 offloads = port->rx_conf[qid].offloads;
3265 port->rx_conf[qid] = port->dev_info.default_rxconf;
3267 port->rx_conf[qid].offloads = offloads;
3269 /* Check if any Rx parameters have been passed */
3270 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3271 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3273 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3274 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3276 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3277 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3279 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3280 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3282 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3283 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3285 port->nb_rx_desc[qid] = nb_rxd;
3288 for (qid = 0; qid < nb_txq; qid++) {
3289 offloads = port->tx_conf[qid].offloads;
3290 port->tx_conf[qid] = port->dev_info.default_txconf;
3292 port->tx_conf[qid].offloads = offloads;
3294 /* Check if any Tx parameters have been passed */
3295 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3296 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3298 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3299 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3301 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3302 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3304 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3305 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3307 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3308 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3310 port->nb_tx_desc[qid] = nb_txd;
3315 init_port_config(void)
3318 struct rte_port *port;
3321 RTE_ETH_FOREACH_DEV(pid) {
3323 port->dev_conf.fdir_conf = fdir_conf;
3325 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3330 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3331 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3332 rss_hf & port->dev_info.flow_type_rss_offloads;
3334 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3335 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3338 if (port->dcb_flag == 0) {
3339 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3340 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
3342 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3345 rxtx_port_config(port);
3347 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3351 map_port_queue_stats_mapping_registers(pid, port);
3352 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3353 rte_pmd_ixgbe_bypass_init(pid);
3356 if (lsc_interrupt &&
3357 (rte_eth_devices[pid].data->dev_flags &
3358 RTE_ETH_DEV_INTR_LSC))
3359 port->dev_conf.intr_conf.lsc = 1;
3360 if (rmv_interrupt &&
3361 (rte_eth_devices[pid].data->dev_flags &
3362 RTE_ETH_DEV_INTR_RMV))
3363 port->dev_conf.intr_conf.rmv = 1;
3367 void set_port_slave_flag(portid_t slave_pid)
3369 struct rte_port *port;
3371 port = &ports[slave_pid];
3372 port->slave_flag = 1;
3375 void clear_port_slave_flag(portid_t slave_pid)
3377 struct rte_port *port;
3379 port = &ports[slave_pid];
3380 port->slave_flag = 0;
3383 uint8_t port_is_bonding_slave(portid_t slave_pid)
3385 struct rte_port *port;
3387 port = &ports[slave_pid];
3388 if ((rte_eth_devices[slave_pid].data->dev_flags &
3389 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3394 const uint16_t vlan_tags[] = {
3395 0, 1, 2, 3, 4, 5, 6, 7,
3396 8, 9, 10, 11, 12, 13, 14, 15,
3397 16, 17, 18, 19, 20, 21, 22, 23,
3398 24, 25, 26, 27, 28, 29, 30, 31
3402 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3403 enum dcb_mode_enable dcb_mode,
3404 enum rte_eth_nb_tcs num_tcs,
3409 struct rte_eth_rss_conf rss_conf;
3412 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3413 * given above, and the number of traffic classes available for use.
3415 if (dcb_mode == DCB_VT_ENABLED) {
3416 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3417 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3418 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3419 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3421 /* VMDQ+DCB RX and TX configurations */
3422 vmdq_rx_conf->enable_default_pool = 0;
3423 vmdq_rx_conf->default_pool = 0;
3424 vmdq_rx_conf->nb_queue_pools =
3425 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3426 vmdq_tx_conf->nb_queue_pools =
3427 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3429 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3430 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3431 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3432 vmdq_rx_conf->pool_map[i].pools =
3433 1 << (i % vmdq_rx_conf->nb_queue_pools);
3435 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3436 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3437 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3440 /* set DCB mode of RX and TX of multiple queues */
3441 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
3442 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3444 struct rte_eth_dcb_rx_conf *rx_conf =
3445 ð_conf->rx_adv_conf.dcb_rx_conf;
3446 struct rte_eth_dcb_tx_conf *tx_conf =
3447 ð_conf->tx_adv_conf.dcb_tx_conf;
3449 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3453 rx_conf->nb_tcs = num_tcs;
3454 tx_conf->nb_tcs = num_tcs;
3456 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3457 rx_conf->dcb_tc[i] = i % num_tcs;
3458 tx_conf->dcb_tc[i] = i % num_tcs;
3461 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3462 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3463 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3467 eth_conf->dcb_capability_en =
3468 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3470 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3476 init_port_dcb_config(portid_t pid,
3477 enum dcb_mode_enable dcb_mode,
3478 enum rte_eth_nb_tcs num_tcs,
3481 struct rte_eth_conf port_conf;
3482 struct rte_port *rte_port;
3486 rte_port = &ports[pid];
3488 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3489 /* Enter DCB configuration status */
3492 port_conf.rxmode = rte_port->dev_conf.rxmode;
3493 port_conf.txmode = rte_port->dev_conf.txmode;
3495 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3496 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3499 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3501 /* re-configure the device . */
3502 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3506 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3510 /* If dev_info.vmdq_pool_base is greater than 0,
3511 * the queue id of vmdq pools is started after pf queues.
3513 if (dcb_mode == DCB_VT_ENABLED &&
3514 rte_port->dev_info.vmdq_pool_base > 0) {
3515 printf("VMDQ_DCB multi-queue mode is nonsensical"
3516 " for port %d.", pid);
3520 /* Assume the ports in testpmd have the same dcb capability
3521 * and has the same number of rxq and txq in dcb mode
3523 if (dcb_mode == DCB_VT_ENABLED) {
3524 if (rte_port->dev_info.max_vfs > 0) {
3525 nb_rxq = rte_port->dev_info.nb_rx_queues;
3526 nb_txq = rte_port->dev_info.nb_tx_queues;
3528 nb_rxq = rte_port->dev_info.max_rx_queues;
3529 nb_txq = rte_port->dev_info.max_tx_queues;
3532 /*if vt is disabled, use all pf queues */
3533 if (rte_port->dev_info.vmdq_pool_base == 0) {
3534 nb_rxq = rte_port->dev_info.max_rx_queues;
3535 nb_txq = rte_port->dev_info.max_tx_queues;
3537 nb_rxq = (queueid_t)num_tcs;
3538 nb_txq = (queueid_t)num_tcs;
3542 rx_free_thresh = 64;
3544 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3546 rxtx_port_config(rte_port);
3548 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3549 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3550 rx_vft_set(pid, vlan_tags[i], 1);
3552 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3556 map_port_queue_stats_mapping_registers(pid, rte_port);
3558 rte_port->dcb_flag = 1;
3566 /* Configuration of Ethernet ports. */
3567 ports = rte_zmalloc("testpmd: ports",
3568 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3569 RTE_CACHE_LINE_SIZE);
3570 if (ports == NULL) {
3571 rte_exit(EXIT_FAILURE,
3572 "rte_zmalloc(%d struct rte_port) failed\n",
3576 /* Initialize ports NUMA structures */
3577 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3578 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3579 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3593 const char clr[] = { 27, '[', '2', 'J', '\0' };
3594 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3596 /* Clear screen and move to top left */
3597 printf("%s%s", clr, top_left);
3599 printf("\nPort statistics ====================================");
3600 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3601 nic_stats_display(fwd_ports_ids[i]);
3607 signal_handler(int signum)
3609 if (signum == SIGINT || signum == SIGTERM) {
3610 printf("\nSignal %d received, preparing to exit...\n",
3612 #ifdef RTE_LIBRTE_PDUMP
3613 /* uninitialize packet capture framework */
3616 #ifdef RTE_LIBRTE_LATENCY_STATS
3617 if (latencystats_enabled != 0)
3618 rte_latencystats_uninit();
3621 /* Set flag to indicate the force termination. */
3623 /* exit with the expected status */
3624 signal(signum, SIG_DFL);
3625 kill(getpid(), signum);
3630 main(int argc, char** argv)
3637 signal(SIGINT, signal_handler);
3638 signal(SIGTERM, signal_handler);
3640 testpmd_logtype = rte_log_register("testpmd");
3641 if (testpmd_logtype < 0)
3642 rte_exit(EXIT_FAILURE, "Cannot register log type");
3643 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3645 diag = rte_eal_init(argc, argv);
3647 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3648 rte_strerror(rte_errno));
3650 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3651 rte_exit(EXIT_FAILURE,
3652 "Secondary process type not supported.\n");
3654 ret = register_eth_event_callback();
3656 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3658 #ifdef RTE_LIBRTE_PDUMP
3659 /* initialize packet capture framework */
3664 RTE_ETH_FOREACH_DEV(port_id) {
3665 ports_ids[count] = port_id;
3668 nb_ports = (portid_t) count;
3670 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3672 /* allocate port structures, and init them */
3675 set_def_fwd_config();
3677 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3678 "Check the core mask argument\n");
3680 /* Bitrate/latency stats disabled by default */
3681 #ifdef RTE_LIBRTE_BITRATE
3682 bitrate_enabled = 0;
3684 #ifdef RTE_LIBRTE_LATENCY_STATS
3685 latencystats_enabled = 0;
3688 /* on FreeBSD, mlockall() is disabled by default */
3689 #ifdef RTE_EXEC_ENV_FREEBSD
3698 launch_args_parse(argc, argv);
3700 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3701 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3705 if (tx_first && interactive)
3706 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3707 "interactive mode.\n");
3709 if (tx_first && lsc_interrupt) {
3710 printf("Warning: lsc_interrupt needs to be off when "
3711 " using tx_first. Disabling.\n");
3715 if (!nb_rxq && !nb_txq)
3716 printf("Warning: Either rx or tx queues should be non-zero\n");
3718 if (nb_rxq > 1 && nb_rxq > nb_txq)
3719 printf("Warning: nb_rxq=%d enables RSS configuration, "
3720 "but nb_txq=%d will prevent to fully test it.\n",
3726 ret = rte_dev_hotplug_handle_enable();
3729 "fail to enable hotplug handling.");
3733 ret = rte_dev_event_monitor_start();
3736 "fail to start device event monitoring.");
3740 ret = rte_dev_event_callback_register(NULL,
3741 dev_event_callback, NULL);
3744 "fail to register device event callback\n");
3749 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3750 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3752 /* set all ports to promiscuous mode by default */
3753 RTE_ETH_FOREACH_DEV(port_id) {
3754 ret = rte_eth_promiscuous_enable(port_id);
3756 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3757 port_id, rte_strerror(-ret));
3760 /* Init metrics library */
3761 rte_metrics_init(rte_socket_id());
3763 #ifdef RTE_LIBRTE_LATENCY_STATS
3764 if (latencystats_enabled != 0) {
3765 int ret = rte_latencystats_init(1, NULL);
3767 printf("Warning: latencystats init()"
3768 " returned error %d\n", ret);
3769 printf("Latencystats running on lcore %d\n",
3770 latencystats_lcore_id);
3774 /* Setup bitrate stats */
3775 #ifdef RTE_LIBRTE_BITRATE
3776 if (bitrate_enabled != 0) {
3777 bitrate_data = rte_stats_bitrate_create();
3778 if (bitrate_data == NULL)
3779 rte_exit(EXIT_FAILURE,
3780 "Could not allocate bitrate data.\n");
3781 rte_stats_bitrate_reg(bitrate_data);
3785 #ifdef RTE_LIBRTE_CMDLINE
3786 if (strlen(cmdline_filename) != 0)
3787 cmdline_read_from_file(cmdline_filename);
3789 if (interactive == 1) {
3791 printf("Start automatic packet forwarding\n");
3792 start_packet_forwarding(0);
3804 printf("No commandline core given, start packet forwarding\n");
3805 start_packet_forwarding(tx_first);
3806 if (stats_period != 0) {
3807 uint64_t prev_time = 0, cur_time, diff_time = 0;
3808 uint64_t timer_period;
3810 /* Convert to number of cycles */
3811 timer_period = stats_period * rte_get_timer_hz();
3813 while (f_quit == 0) {
3814 cur_time = rte_get_timer_cycles();
3815 diff_time += cur_time - prev_time;
3817 if (diff_time >= timer_period) {
3819 /* Reset the timer */
3822 /* Sleep to avoid unnecessary checks */
3823 prev_time = cur_time;
3828 printf("Press enter to exit\n");
3829 rc = read(0, &c, 1);
3835 ret = rte_eal_cleanup();
3837 rte_exit(EXIT_FAILURE,
3838 "EAL cleanup failed: %s\n", strerror(-ret));
3840 return EXIT_SUCCESS;