1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #ifdef RTE_LIBRTE_IEEE1588
183 &ieee1588_fwd_engine,
188 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
189 uint16_t mempool_flags;
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint8_t txonly_multi_flow;
221 /**< Whether multiple flows are generated in TXONLY mode. */
223 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
224 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
226 /* current configuration is in DCB or not,0 means it is not in DCB mode */
227 uint8_t dcb_config = 0;
229 /* Whether the dcb is in testing status */
230 uint8_t dcb_test = 0;
233 * Configurable number of RX/TX queues.
235 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
236 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
237 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
240 * Configurable number of RX/TX ring descriptors.
241 * Defaults are supplied by drivers via ethdev.
243 #define RTE_TEST_RX_DESC_DEFAULT 0
244 #define RTE_TEST_TX_DESC_DEFAULT 0
245 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
246 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
248 #define RTE_PMD_PARAM_UNSET -1
250 * Configurable values of RX and TX ring threshold registers.
253 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
257 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
258 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of RX free threshold.
264 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
267 * Configurable value of RX drop enable.
269 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
272 * Configurable value of TX free threshold.
274 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
277 * Configurable value of TX RS bit threshold.
279 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
282 * Configurable value of buffered packets before sending.
284 uint16_t noisy_tx_sw_bufsz;
287 * Configurable value of packet buffer timeout.
289 uint16_t noisy_tx_sw_buf_flush_time;
292 * Configurable value for size of VNF internal memory area
293 * used for simulating noisy neighbour behaviour
295 uint64_t noisy_lkup_mem_sz;
298 * Configurable value of number of random writes done in
299 * VNF simulation memory area.
301 uint64_t noisy_lkup_num_writes;
304 * Configurable value of number of random reads done in
305 * VNF simulation memory area.
307 uint64_t noisy_lkup_num_reads;
310 * Configurable value of number of random reads/writes done in
311 * VNF simulation memory area.
313 uint64_t noisy_lkup_num_reads_writes;
316 * Receive Side Scaling (RSS) configuration.
318 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
321 * Port topology configuration
323 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
326 * Avoids to flush all the RX streams before starts forwarding.
328 uint8_t no_flush_rx = 0; /* flush by default */
331 * Flow API isolated mode.
333 uint8_t flow_isolate_all;
336 * Avoids to check link status when starting/stopping a port.
338 uint8_t no_link_check = 0; /* check by default */
341 * Don't automatically start all ports in interactive mode.
343 uint8_t no_device_start = 0;
346 * Enable link status change notification
348 uint8_t lsc_interrupt = 1; /* enabled by default */
351 * Enable device removal notification.
353 uint8_t rmv_interrupt = 1; /* enabled by default */
355 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
357 /* After attach, port setup is called on event or by iterator */
358 bool setup_on_probe_event = true;
360 /* Clear ptypes on port initialization. */
361 uint8_t clear_ptypes = true;
363 /* Pretty printing of ethdev events */
364 static const char * const eth_event_desc[] = {
365 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
366 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
367 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
368 [RTE_ETH_EVENT_INTR_RESET] = "reset",
369 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
370 [RTE_ETH_EVENT_IPSEC] = "IPsec",
371 [RTE_ETH_EVENT_MACSEC] = "MACsec",
372 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
373 [RTE_ETH_EVENT_NEW] = "device probed",
374 [RTE_ETH_EVENT_DESTROY] = "device released",
375 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
376 [RTE_ETH_EVENT_MAX] = NULL,
380 * Display or mask ether events
381 * Default to all events except VF_MBOX
383 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
384 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
385 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
386 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
387 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
388 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
389 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
390 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
392 * Decide if all memory are locked for performance.
397 * NIC bypass mode configuration options.
400 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
401 /* The NIC bypass watchdog timeout. */
402 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
406 #ifdef RTE_LIBRTE_LATENCY_STATS
409 * Set when latency stats is enabled in the commandline
411 uint8_t latencystats_enabled;
414 * Lcore ID to serive latency statistics.
416 lcoreid_t latencystats_lcore_id = -1;
421 * Ethernet device configuration.
423 struct rte_eth_rxmode rx_mode = {
424 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
425 /**< Default maximum frame length. */
428 struct rte_eth_txmode tx_mode = {
429 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
432 struct rte_fdir_conf fdir_conf = {
433 .mode = RTE_FDIR_MODE_NONE,
434 .pballoc = RTE_FDIR_PBALLOC_64K,
435 .status = RTE_FDIR_REPORT_STATUS,
437 .vlan_tci_mask = 0xFFEF,
439 .src_ip = 0xFFFFFFFF,
440 .dst_ip = 0xFFFFFFFF,
443 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
444 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
446 .src_port_mask = 0xFFFF,
447 .dst_port_mask = 0xFFFF,
448 .mac_addr_byte_mask = 0xFF,
449 .tunnel_type_mask = 1,
450 .tunnel_id_mask = 0xFFFFFFFF,
455 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
457 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
458 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
460 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
461 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
463 uint16_t nb_tx_queue_stats_mappings = 0;
464 uint16_t nb_rx_queue_stats_mappings = 0;
467 * Display zero values by default for xstats
469 uint8_t xstats_hide_zero;
471 unsigned int num_sockets = 0;
472 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
474 #ifdef RTE_LIBRTE_BITRATE
475 /* Bitrate statistics */
476 struct rte_stats_bitrates *bitrate_data;
477 lcoreid_t bitrate_lcore_id;
478 uint8_t bitrate_enabled;
481 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
482 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
485 * hexadecimal bitmask of RX mq mode can be enabled.
487 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
489 /* Forward function declarations */
490 static void setup_attached_port(portid_t pi);
491 static void map_port_queue_stats_mapping_registers(portid_t pi,
492 struct rte_port *port);
493 static void check_all_ports_link_status(uint32_t port_mask);
494 static int eth_event_callback(portid_t port_id,
495 enum rte_eth_event_type type,
496 void *param, void *ret_param);
497 static void dev_event_callback(const char *device_name,
498 enum rte_dev_event_type type,
502 * Check if all the ports are started.
503 * If yes, return positive value. If not, return zero.
505 static int all_ports_started(void);
507 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
508 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
510 /* Holds the registered mbuf dynamic flags names. */
511 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
514 * Helper function to check if socket is already discovered.
515 * If yes, return positive value. If not, return zero.
518 new_socket_id(unsigned int socket_id)
522 for (i = 0; i < num_sockets; i++) {
523 if (socket_ids[i] == socket_id)
530 * Setup default configuration.
533 set_default_fwd_lcores_config(void)
537 unsigned int sock_num;
540 for (i = 0; i < RTE_MAX_LCORE; i++) {
541 if (!rte_lcore_is_enabled(i))
543 sock_num = rte_lcore_to_socket_id(i);
544 if (new_socket_id(sock_num)) {
545 if (num_sockets >= RTE_MAX_NUMA_NODES) {
546 rte_exit(EXIT_FAILURE,
547 "Total sockets greater than %u\n",
550 socket_ids[num_sockets++] = sock_num;
552 if (i == rte_get_master_lcore())
554 fwd_lcores_cpuids[nb_lc++] = i;
556 nb_lcores = (lcoreid_t) nb_lc;
557 nb_cfg_lcores = nb_lcores;
562 set_def_peer_eth_addrs(void)
566 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
567 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
568 peer_eth_addrs[i].addr_bytes[5] = i;
573 set_default_fwd_ports_config(void)
578 RTE_ETH_FOREACH_DEV(pt_id) {
579 fwd_ports_ids[i++] = pt_id;
581 /* Update sockets info according to the attached device */
582 int socket_id = rte_eth_dev_socket_id(pt_id);
583 if (socket_id >= 0 && new_socket_id(socket_id)) {
584 if (num_sockets >= RTE_MAX_NUMA_NODES) {
585 rte_exit(EXIT_FAILURE,
586 "Total sockets greater than %u\n",
589 socket_ids[num_sockets++] = socket_id;
593 nb_cfg_ports = nb_ports;
594 nb_fwd_ports = nb_ports;
598 set_def_fwd_config(void)
600 set_default_fwd_lcores_config();
601 set_def_peer_eth_addrs();
602 set_default_fwd_ports_config();
605 /* extremely pessimistic estimation of memory required to create a mempool */
607 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
609 unsigned int n_pages, mbuf_per_pg, leftover;
610 uint64_t total_mem, mbuf_mem, obj_sz;
612 /* there is no good way to predict how much space the mempool will
613 * occupy because it will allocate chunks on the fly, and some of those
614 * will come from default DPDK memory while some will come from our
615 * external memory, so just assume 128MB will be enough for everyone.
617 uint64_t hdr_mem = 128 << 20;
619 /* account for possible non-contiguousness */
620 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
622 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
626 mbuf_per_pg = pgsz / obj_sz;
627 leftover = (nb_mbufs % mbuf_per_pg) > 0;
628 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
630 mbuf_mem = n_pages * pgsz;
632 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
634 if (total_mem > SIZE_MAX) {
635 TESTPMD_LOG(ERR, "Memory size too big\n");
638 *out = (size_t)total_mem;
644 pagesz_flags(uint64_t page_sz)
646 /* as per mmap() manpage, all page sizes are log2 of page size
647 * shifted by MAP_HUGE_SHIFT
649 int log2 = rte_log2_u64(page_sz);
651 return (log2 << HUGE_SHIFT);
655 alloc_mem(size_t memsz, size_t pgsz, bool huge)
660 /* allocate anonymous hugepages */
661 flags = MAP_ANONYMOUS | MAP_PRIVATE;
663 flags |= HUGE_FLAG | pagesz_flags(pgsz);
665 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
666 if (addr == MAP_FAILED)
672 struct extmem_param {
676 rte_iova_t *iova_table;
677 unsigned int iova_table_len;
681 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
684 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
685 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
686 unsigned int cur_page, n_pages, pgsz_idx;
687 size_t mem_sz, cur_pgsz;
688 rte_iova_t *iovas = NULL;
692 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
693 /* skip anything that is too big */
694 if (pgsizes[pgsz_idx] > SIZE_MAX)
697 cur_pgsz = pgsizes[pgsz_idx];
699 /* if we were told not to allocate hugepages, override */
701 cur_pgsz = sysconf(_SC_PAGESIZE);
703 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
705 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
709 /* allocate our memory */
710 addr = alloc_mem(mem_sz, cur_pgsz, huge);
712 /* if we couldn't allocate memory with a specified page size,
713 * that doesn't mean we can't do it with other page sizes, so
719 /* store IOVA addresses for every page in this memory area */
720 n_pages = mem_sz / cur_pgsz;
722 iovas = malloc(sizeof(*iovas) * n_pages);
725 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
728 /* lock memory if it's not huge pages */
732 /* populate IOVA addresses */
733 for (cur_page = 0; cur_page < n_pages; cur_page++) {
738 offset = cur_pgsz * cur_page;
739 cur = RTE_PTR_ADD(addr, offset);
741 /* touch the page before getting its IOVA */
742 *(volatile char *)cur = 0;
744 iova = rte_mem_virt2iova(cur);
746 iovas[cur_page] = iova;
751 /* if we couldn't allocate anything */
757 param->pgsz = cur_pgsz;
758 param->iova_table = iovas;
759 param->iova_table_len = n_pages;
766 munmap(addr, mem_sz);
772 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
774 struct extmem_param param;
777 memset(¶m, 0, sizeof(param));
779 /* check if our heap exists */
780 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
782 /* create our heap */
783 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
785 TESTPMD_LOG(ERR, "Cannot create heap\n");
790 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
792 TESTPMD_LOG(ERR, "Cannot create memory area\n");
796 /* we now have a valid memory area, so add it to heap */
797 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
798 param.addr, param.len, param.iova_table,
799 param.iova_table_len, param.pgsz);
801 /* when using VFIO, memory is automatically mapped for DMA by EAL */
803 /* not needed any more */
804 free(param.iova_table);
807 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
808 munmap(param.addr, param.len);
814 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
820 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
821 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
826 RTE_ETH_FOREACH_DEV(pid) {
827 struct rte_eth_dev *dev =
828 &rte_eth_devices[pid];
830 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
834 "unable to DMA unmap addr 0x%p "
836 memhdr->addr, dev->data->name);
839 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
842 "unable to un-register addr 0x%p\n", memhdr->addr);
847 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
848 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
851 size_t page_size = sysconf(_SC_PAGESIZE);
854 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
858 "unable to register addr 0x%p\n", memhdr->addr);
861 RTE_ETH_FOREACH_DEV(pid) {
862 struct rte_eth_dev *dev =
863 &rte_eth_devices[pid];
865 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
869 "unable to DMA map addr 0x%p "
871 memhdr->addr, dev->data->name);
877 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
878 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
880 struct rte_pktmbuf_extmem *xmem;
881 unsigned int ext_num, zone_num, elt_num;
884 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
885 elt_num = EXTBUF_ZONE_SIZE / elt_size;
886 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
888 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
890 TESTPMD_LOG(ERR, "Cannot allocate memory for "
891 "external buffer descriptors\n");
895 for (ext_num = 0; ext_num < zone_num; ext_num++) {
896 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
897 const struct rte_memzone *mz;
898 char mz_name[RTE_MEMZONE_NAMESIZE];
901 ret = snprintf(mz_name, sizeof(mz_name),
902 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
903 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
904 errno = ENAMETOOLONG;
908 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
910 RTE_MEMZONE_IOVA_CONTIG |
912 RTE_MEMZONE_SIZE_HINT_ONLY,
916 * The caller exits on external buffer creation
917 * error, so there is no need to free memzones.
923 xseg->buf_ptr = mz->addr;
924 xseg->buf_iova = mz->iova;
925 xseg->buf_len = EXTBUF_ZONE_SIZE;
926 xseg->elt_size = elt_size;
928 if (ext_num == 0 && xmem != NULL) {
937 * Configuration initialisation done once at init time.
939 static struct rte_mempool *
940 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
941 unsigned int socket_id)
943 char pool_name[RTE_MEMPOOL_NAMESIZE];
944 struct rte_mempool *rte_mp = NULL;
947 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
948 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
951 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
952 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
954 switch (mp_alloc_type) {
955 case MP_ALLOC_NATIVE:
957 /* wrapper to rte_mempool_create() */
958 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
959 rte_mbuf_best_mempool_ops());
960 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
961 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
966 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
967 mb_size, (unsigned int) mb_mempool_cache,
968 sizeof(struct rte_pktmbuf_pool_private),
969 socket_id, mempool_flags);
973 if (rte_mempool_populate_anon(rte_mp) == 0) {
974 rte_mempool_free(rte_mp);
978 rte_pktmbuf_pool_init(rte_mp, NULL);
979 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
980 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
984 case MP_ALLOC_XMEM_HUGE:
987 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
989 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
990 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
993 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
995 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
997 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
998 rte_mbuf_best_mempool_ops());
999 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1000 mb_mempool_cache, 0, mbuf_seg_size,
1006 struct rte_pktmbuf_extmem *ext_mem;
1007 unsigned int ext_num;
1009 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1010 socket_id, pool_name, &ext_mem);
1012 rte_exit(EXIT_FAILURE,
1013 "Can't create pinned data buffers\n");
1015 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1016 rte_mbuf_best_mempool_ops());
1017 rte_mp = rte_pktmbuf_pool_create_extbuf
1018 (pool_name, nb_mbuf, mb_mempool_cache,
1019 0, mbuf_seg_size, socket_id,
1026 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1031 if (rte_mp == NULL) {
1032 rte_exit(EXIT_FAILURE,
1033 "Creation of mbuf pool for socket %u failed: %s\n",
1034 socket_id, rte_strerror(rte_errno));
1035 } else if (verbose_level > 0) {
1036 rte_mempool_dump(stdout, rte_mp);
1042 * Check given socket id is valid or not with NUMA mode,
1043 * if valid, return 0, else return -1
1046 check_socket_id(const unsigned int socket_id)
1048 static int warning_once = 0;
1050 if (new_socket_id(socket_id)) {
1051 if (!warning_once && numa_support)
1052 printf("Warning: NUMA should be configured manually by"
1053 " using --port-numa-config and"
1054 " --ring-numa-config parameters along with"
1063 * Get the allowed maximum number of RX queues.
1064 * *pid return the port id which has minimal value of
1065 * max_rx_queues in all ports.
1068 get_allowed_max_nb_rxq(portid_t *pid)
1070 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1071 bool max_rxq_valid = false;
1073 struct rte_eth_dev_info dev_info;
1075 RTE_ETH_FOREACH_DEV(pi) {
1076 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1079 max_rxq_valid = true;
1080 if (dev_info.max_rx_queues < allowed_max_rxq) {
1081 allowed_max_rxq = dev_info.max_rx_queues;
1085 return max_rxq_valid ? allowed_max_rxq : 0;
1089 * Check input rxq is valid or not.
1090 * If input rxq is not greater than any of maximum number
1091 * of RX queues of all ports, it is valid.
1092 * if valid, return 0, else return -1
1095 check_nb_rxq(queueid_t rxq)
1097 queueid_t allowed_max_rxq;
1100 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1101 if (rxq > allowed_max_rxq) {
1102 printf("Fail: input rxq (%u) can't be greater "
1103 "than max_rx_queues (%u) of port %u\n",
1113 * Get the allowed maximum number of TX queues.
1114 * *pid return the port id which has minimal value of
1115 * max_tx_queues in all ports.
1118 get_allowed_max_nb_txq(portid_t *pid)
1120 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1121 bool max_txq_valid = false;
1123 struct rte_eth_dev_info dev_info;
1125 RTE_ETH_FOREACH_DEV(pi) {
1126 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1129 max_txq_valid = true;
1130 if (dev_info.max_tx_queues < allowed_max_txq) {
1131 allowed_max_txq = dev_info.max_tx_queues;
1135 return max_txq_valid ? allowed_max_txq : 0;
1139 * Check input txq is valid or not.
1140 * If input txq is not greater than any of maximum number
1141 * of TX queues of all ports, it is valid.
1142 * if valid, return 0, else return -1
1145 check_nb_txq(queueid_t txq)
1147 queueid_t allowed_max_txq;
1150 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1151 if (txq > allowed_max_txq) {
1152 printf("Fail: input txq (%u) can't be greater "
1153 "than max_tx_queues (%u) of port %u\n",
1163 * Get the allowed maximum number of RXDs of every rx queue.
1164 * *pid return the port id which has minimal value of
1165 * max_rxd in all queues of all ports.
1168 get_allowed_max_nb_rxd(portid_t *pid)
1170 uint16_t allowed_max_rxd = UINT16_MAX;
1172 struct rte_eth_dev_info dev_info;
1174 RTE_ETH_FOREACH_DEV(pi) {
1175 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1178 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1179 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1183 return allowed_max_rxd;
1187 * Get the allowed minimal number of RXDs of every rx queue.
1188 * *pid return the port id which has minimal value of
1189 * min_rxd in all queues of all ports.
1192 get_allowed_min_nb_rxd(portid_t *pid)
1194 uint16_t allowed_min_rxd = 0;
1196 struct rte_eth_dev_info dev_info;
1198 RTE_ETH_FOREACH_DEV(pi) {
1199 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1202 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1203 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1208 return allowed_min_rxd;
1212 * Check input rxd is valid or not.
1213 * If input rxd is not greater than any of maximum number
1214 * of RXDs of every Rx queues and is not less than any of
1215 * minimal number of RXDs of every Rx queues, it is valid.
1216 * if valid, return 0, else return -1
1219 check_nb_rxd(queueid_t rxd)
1221 uint16_t allowed_max_rxd;
1222 uint16_t allowed_min_rxd;
1225 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1226 if (rxd > allowed_max_rxd) {
1227 printf("Fail: input rxd (%u) can't be greater "
1228 "than max_rxds (%u) of port %u\n",
1235 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1236 if (rxd < allowed_min_rxd) {
1237 printf("Fail: input rxd (%u) can't be less "
1238 "than min_rxds (%u) of port %u\n",
1249 * Get the allowed maximum number of TXDs of every rx queues.
1250 * *pid return the port id which has minimal value of
1251 * max_txd in every tx queue.
1254 get_allowed_max_nb_txd(portid_t *pid)
1256 uint16_t allowed_max_txd = UINT16_MAX;
1258 struct rte_eth_dev_info dev_info;
1260 RTE_ETH_FOREACH_DEV(pi) {
1261 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1264 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1265 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1269 return allowed_max_txd;
1273 * Get the allowed maximum number of TXDs of every tx queues.
1274 * *pid return the port id which has minimal value of
1275 * min_txd in every tx queue.
1278 get_allowed_min_nb_txd(portid_t *pid)
1280 uint16_t allowed_min_txd = 0;
1282 struct rte_eth_dev_info dev_info;
1284 RTE_ETH_FOREACH_DEV(pi) {
1285 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1288 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1289 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1294 return allowed_min_txd;
1298 * Check input txd is valid or not.
1299 * If input txd is not greater than any of maximum number
1300 * of TXDs of every Rx queues, it is valid.
1301 * if valid, return 0, else return -1
1304 check_nb_txd(queueid_t txd)
1306 uint16_t allowed_max_txd;
1307 uint16_t allowed_min_txd;
1310 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1311 if (txd > allowed_max_txd) {
1312 printf("Fail: input txd (%u) can't be greater "
1313 "than max_txds (%u) of port %u\n",
1320 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1321 if (txd < allowed_min_txd) {
1322 printf("Fail: input txd (%u) can't be less "
1323 "than min_txds (%u) of port %u\n",
1334 * Get the allowed maximum number of hairpin queues.
1335 * *pid return the port id which has minimal value of
1336 * max_hairpin_queues in all ports.
1339 get_allowed_max_nb_hairpinq(portid_t *pid)
1341 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1343 struct rte_eth_hairpin_cap cap;
1345 RTE_ETH_FOREACH_DEV(pi) {
1346 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1350 if (cap.max_nb_queues < allowed_max_hairpinq) {
1351 allowed_max_hairpinq = cap.max_nb_queues;
1355 return allowed_max_hairpinq;
1359 * Check input hairpin is valid or not.
1360 * If input hairpin is not greater than any of maximum number
1361 * of hairpin queues of all ports, it is valid.
1362 * if valid, return 0, else return -1
1365 check_nb_hairpinq(queueid_t hairpinq)
1367 queueid_t allowed_max_hairpinq;
1370 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1371 if (hairpinq > allowed_max_hairpinq) {
1372 printf("Fail: input hairpin (%u) can't be greater "
1373 "than max_hairpin_queues (%u) of port %u\n",
1374 hairpinq, allowed_max_hairpinq, pid);
1384 struct rte_port *port;
1385 struct rte_mempool *mbp;
1386 unsigned int nb_mbuf_per_pool;
1388 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1389 struct rte_gro_param gro_param;
1396 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1398 /* Configuration of logical cores. */
1399 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1400 sizeof(struct fwd_lcore *) * nb_lcores,
1401 RTE_CACHE_LINE_SIZE);
1402 if (fwd_lcores == NULL) {
1403 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1404 "failed\n", nb_lcores);
1406 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1407 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1408 sizeof(struct fwd_lcore),
1409 RTE_CACHE_LINE_SIZE);
1410 if (fwd_lcores[lc_id] == NULL) {
1411 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1414 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1417 RTE_ETH_FOREACH_DEV(pid) {
1419 /* Apply default TxRx configuration for all ports */
1420 port->dev_conf.txmode = tx_mode;
1421 port->dev_conf.rxmode = rx_mode;
1423 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1425 rte_exit(EXIT_FAILURE,
1426 "rte_eth_dev_info_get() failed\n");
1428 if (!(port->dev_info.tx_offload_capa &
1429 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1430 port->dev_conf.txmode.offloads &=
1431 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1433 if (port_numa[pid] != NUMA_NO_CONFIG)
1434 port_per_socket[port_numa[pid]]++;
1436 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1439 * if socket_id is invalid,
1440 * set to the first available socket.
1442 if (check_socket_id(socket_id) < 0)
1443 socket_id = socket_ids[0];
1444 port_per_socket[socket_id]++;
1448 /* Apply Rx offloads configuration */
1449 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1450 port->rx_conf[k].offloads =
1451 port->dev_conf.rxmode.offloads;
1452 /* Apply Tx offloads configuration */
1453 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1454 port->tx_conf[k].offloads =
1455 port->dev_conf.txmode.offloads;
1457 /* set flag to initialize port/queue */
1458 port->need_reconfig = 1;
1459 port->need_reconfig_queues = 1;
1460 port->tx_metadata = 0;
1462 /* Check for maximum number of segments per MTU. Accordingly
1463 * update the mbuf data size.
1465 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1466 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1467 data_size = rx_mode.max_rx_pkt_len /
1468 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1470 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1472 mbuf_data_size = data_size +
1473 RTE_PKTMBUF_HEADROOM;
1480 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1484 * Create pools of mbuf.
1485 * If NUMA support is disabled, create a single pool of mbuf in
1486 * socket 0 memory by default.
1487 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1489 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1490 * nb_txd can be configured at run time.
1492 if (param_total_num_mbufs)
1493 nb_mbuf_per_pool = param_total_num_mbufs;
1495 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1496 (nb_lcores * mb_mempool_cache) +
1497 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1498 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1504 for (i = 0; i < num_sockets; i++)
1505 mempools[i] = mbuf_pool_create(mbuf_data_size,
1509 if (socket_num == UMA_NO_CONFIG)
1510 mempools[0] = mbuf_pool_create(mbuf_data_size,
1511 nb_mbuf_per_pool, 0);
1513 mempools[socket_num] = mbuf_pool_create
1521 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1522 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1524 * Records which Mbuf pool to use by each logical core, if needed.
1526 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1527 mbp = mbuf_pool_find(
1528 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1531 mbp = mbuf_pool_find(0);
1532 fwd_lcores[lc_id]->mbp = mbp;
1533 /* initialize GSO context */
1534 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1535 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1536 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1537 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1539 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1542 /* Configuration of packet forwarding streams. */
1543 if (init_fwd_streams() < 0)
1544 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1548 /* create a gro context for each lcore */
1549 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1550 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1551 gro_param.max_item_per_flow = MAX_PKT_BURST;
1552 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1553 gro_param.socket_id = rte_lcore_to_socket_id(
1554 fwd_lcores_cpuids[lc_id]);
1555 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1556 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1557 rte_exit(EXIT_FAILURE,
1558 "rte_gro_ctx_create() failed\n");
1565 reconfig(portid_t new_port_id, unsigned socket_id)
1567 struct rte_port *port;
1570 /* Reconfiguration of Ethernet ports. */
1571 port = &ports[new_port_id];
1573 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1577 /* set flag to initialize port/queue */
1578 port->need_reconfig = 1;
1579 port->need_reconfig_queues = 1;
1580 port->socket_id = socket_id;
1587 init_fwd_streams(void)
1590 struct rte_port *port;
1591 streamid_t sm_id, nb_fwd_streams_new;
1594 /* set socket id according to numa or not */
1595 RTE_ETH_FOREACH_DEV(pid) {
1597 if (nb_rxq > port->dev_info.max_rx_queues) {
1598 printf("Fail: nb_rxq(%d) is greater than "
1599 "max_rx_queues(%d)\n", nb_rxq,
1600 port->dev_info.max_rx_queues);
1603 if (nb_txq > port->dev_info.max_tx_queues) {
1604 printf("Fail: nb_txq(%d) is greater than "
1605 "max_tx_queues(%d)\n", nb_txq,
1606 port->dev_info.max_tx_queues);
1610 if (port_numa[pid] != NUMA_NO_CONFIG)
1611 port->socket_id = port_numa[pid];
1613 port->socket_id = rte_eth_dev_socket_id(pid);
1616 * if socket_id is invalid,
1617 * set to the first available socket.
1619 if (check_socket_id(port->socket_id) < 0)
1620 port->socket_id = socket_ids[0];
1624 if (socket_num == UMA_NO_CONFIG)
1625 port->socket_id = 0;
1627 port->socket_id = socket_num;
1631 q = RTE_MAX(nb_rxq, nb_txq);
1633 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1636 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1637 if (nb_fwd_streams_new == nb_fwd_streams)
1640 if (fwd_streams != NULL) {
1641 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1642 if (fwd_streams[sm_id] == NULL)
1644 rte_free(fwd_streams[sm_id]);
1645 fwd_streams[sm_id] = NULL;
1647 rte_free(fwd_streams);
1652 nb_fwd_streams = nb_fwd_streams_new;
1653 if (nb_fwd_streams) {
1654 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1655 sizeof(struct fwd_stream *) * nb_fwd_streams,
1656 RTE_CACHE_LINE_SIZE);
1657 if (fwd_streams == NULL)
1658 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1659 " (struct fwd_stream *)) failed\n",
1662 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1663 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1664 " struct fwd_stream", sizeof(struct fwd_stream),
1665 RTE_CACHE_LINE_SIZE);
1666 if (fwd_streams[sm_id] == NULL)
1667 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1668 "(struct fwd_stream) failed\n");
1675 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1677 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1679 uint64_t total_burst, sburst;
1681 uint64_t burst_stats[4];
1682 uint16_t pktnb_stats[4];
1684 int burst_percent[4], sburstp;
1688 * First compute the total number of packet bursts and the
1689 * two highest numbers of bursts of the same number of packets.
1691 memset(&burst_stats, 0x0, sizeof(burst_stats));
1692 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1694 /* Show stats for 0 burst size always */
1695 total_burst = pbs->pkt_burst_spread[0];
1696 burst_stats[0] = pbs->pkt_burst_spread[0];
1699 /* Find the next 2 burst sizes with highest occurrences. */
1700 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1701 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1706 total_burst += nb_burst;
1708 if (nb_burst > burst_stats[1]) {
1709 burst_stats[2] = burst_stats[1];
1710 pktnb_stats[2] = pktnb_stats[1];
1711 burst_stats[1] = nb_burst;
1712 pktnb_stats[1] = nb_pkt;
1713 } else if (nb_burst > burst_stats[2]) {
1714 burst_stats[2] = nb_burst;
1715 pktnb_stats[2] = nb_pkt;
1718 if (total_burst == 0)
1721 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1722 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1724 printf("%d%% of other]\n", 100 - sburstp);
1728 sburst += burst_stats[i];
1729 if (sburst == total_burst) {
1730 printf("%d%% of %d pkts]\n",
1731 100 - sburstp, (int) pktnb_stats[i]);
1736 (double)burst_stats[i] / total_burst * 100;
1737 printf("%d%% of %d pkts + ",
1738 burst_percent[i], (int) pktnb_stats[i]);
1739 sburstp += burst_percent[i];
1742 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1745 fwd_stream_stats_display(streamid_t stream_id)
1747 struct fwd_stream *fs;
1748 static const char *fwd_top_stats_border = "-------";
1750 fs = fwd_streams[stream_id];
1751 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1752 (fs->fwd_dropped == 0))
1754 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1755 "TX Port=%2d/Queue=%2d %s\n",
1756 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1757 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1758 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1759 " TX-dropped: %-14"PRIu64,
1760 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1762 /* if checksum mode */
1763 if (cur_fwd_eng == &csum_fwd_engine) {
1764 printf(" RX- bad IP checksum: %-14"PRIu64
1765 " Rx- bad L4 checksum: %-14"PRIu64
1766 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1767 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1768 fs->rx_bad_outer_l4_csum);
1773 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1774 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1775 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1780 fwd_stats_display(void)
1782 static const char *fwd_stats_border = "----------------------";
1783 static const char *acc_stats_border = "+++++++++++++++";
1785 struct fwd_stream *rx_stream;
1786 struct fwd_stream *tx_stream;
1787 uint64_t tx_dropped;
1788 uint64_t rx_bad_ip_csum;
1789 uint64_t rx_bad_l4_csum;
1790 uint64_t rx_bad_outer_l4_csum;
1791 } ports_stats[RTE_MAX_ETHPORTS];
1792 uint64_t total_rx_dropped = 0;
1793 uint64_t total_tx_dropped = 0;
1794 uint64_t total_rx_nombuf = 0;
1795 struct rte_eth_stats stats;
1796 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1797 uint64_t fwd_cycles = 0;
1799 uint64_t total_recv = 0;
1800 uint64_t total_xmit = 0;
1801 struct rte_port *port;
1806 memset(ports_stats, 0, sizeof(ports_stats));
1808 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1809 struct fwd_stream *fs = fwd_streams[sm_id];
1811 if (cur_fwd_config.nb_fwd_streams >
1812 cur_fwd_config.nb_fwd_ports) {
1813 fwd_stream_stats_display(sm_id);
1815 ports_stats[fs->tx_port].tx_stream = fs;
1816 ports_stats[fs->rx_port].rx_stream = fs;
1819 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1821 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1822 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1823 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1824 fs->rx_bad_outer_l4_csum;
1826 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1827 fwd_cycles += fs->core_cycles;
1830 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1833 pt_id = fwd_ports_ids[i];
1834 port = &ports[pt_id];
1836 rte_eth_stats_get(pt_id, &stats);
1837 stats.ipackets -= port->stats.ipackets;
1838 stats.opackets -= port->stats.opackets;
1839 stats.ibytes -= port->stats.ibytes;
1840 stats.obytes -= port->stats.obytes;
1841 stats.imissed -= port->stats.imissed;
1842 stats.oerrors -= port->stats.oerrors;
1843 stats.rx_nombuf -= port->stats.rx_nombuf;
1845 total_recv += stats.ipackets;
1846 total_xmit += stats.opackets;
1847 total_rx_dropped += stats.imissed;
1848 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1849 total_tx_dropped += stats.oerrors;
1850 total_rx_nombuf += stats.rx_nombuf;
1852 printf("\n %s Forward statistics for port %-2d %s\n",
1853 fwd_stats_border, pt_id, fwd_stats_border);
1855 if (!port->rx_queue_stats_mapping_enabled &&
1856 !port->tx_queue_stats_mapping_enabled) {
1857 printf(" RX-packets: %-14"PRIu64
1858 " RX-dropped: %-14"PRIu64
1859 "RX-total: %-"PRIu64"\n",
1860 stats.ipackets, stats.imissed,
1861 stats.ipackets + stats.imissed);
1863 if (cur_fwd_eng == &csum_fwd_engine)
1864 printf(" Bad-ipcsum: %-14"PRIu64
1865 " Bad-l4csum: %-14"PRIu64
1866 "Bad-outer-l4csum: %-14"PRIu64"\n",
1867 ports_stats[pt_id].rx_bad_ip_csum,
1868 ports_stats[pt_id].rx_bad_l4_csum,
1869 ports_stats[pt_id].rx_bad_outer_l4_csum);
1870 if (stats.ierrors + stats.rx_nombuf > 0) {
1871 printf(" RX-error: %-"PRIu64"\n",
1873 printf(" RX-nombufs: %-14"PRIu64"\n",
1877 printf(" TX-packets: %-14"PRIu64
1878 " TX-dropped: %-14"PRIu64
1879 "TX-total: %-"PRIu64"\n",
1880 stats.opackets, ports_stats[pt_id].tx_dropped,
1881 stats.opackets + ports_stats[pt_id].tx_dropped);
1883 printf(" RX-packets: %14"PRIu64
1884 " RX-dropped:%14"PRIu64
1885 " RX-total:%14"PRIu64"\n",
1886 stats.ipackets, stats.imissed,
1887 stats.ipackets + stats.imissed);
1889 if (cur_fwd_eng == &csum_fwd_engine)
1890 printf(" Bad-ipcsum:%14"PRIu64
1891 " Bad-l4csum:%14"PRIu64
1892 " Bad-outer-l4csum: %-14"PRIu64"\n",
1893 ports_stats[pt_id].rx_bad_ip_csum,
1894 ports_stats[pt_id].rx_bad_l4_csum,
1895 ports_stats[pt_id].rx_bad_outer_l4_csum);
1896 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1897 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1898 printf(" RX-nombufs: %14"PRIu64"\n",
1902 printf(" TX-packets: %14"PRIu64
1903 " TX-dropped:%14"PRIu64
1904 " TX-total:%14"PRIu64"\n",
1905 stats.opackets, ports_stats[pt_id].tx_dropped,
1906 stats.opackets + ports_stats[pt_id].tx_dropped);
1909 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1910 if (ports_stats[pt_id].rx_stream)
1911 pkt_burst_stats_display("RX",
1912 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1913 if (ports_stats[pt_id].tx_stream)
1914 pkt_burst_stats_display("TX",
1915 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1918 if (port->rx_queue_stats_mapping_enabled) {
1920 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1921 printf(" Stats reg %2d RX-packets:%14"PRIu64
1922 " RX-errors:%14"PRIu64
1923 " RX-bytes:%14"PRIu64"\n",
1924 j, stats.q_ipackets[j],
1925 stats.q_errors[j], stats.q_ibytes[j]);
1929 if (port->tx_queue_stats_mapping_enabled) {
1930 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1931 printf(" Stats reg %2d TX-packets:%14"PRIu64
1934 j, stats.q_opackets[j],
1939 printf(" %s--------------------------------%s\n",
1940 fwd_stats_border, fwd_stats_border);
1943 printf("\n %s Accumulated forward statistics for all ports"
1945 acc_stats_border, acc_stats_border);
1946 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1948 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1950 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1951 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1952 if (total_rx_nombuf > 0)
1953 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1954 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1956 acc_stats_border, acc_stats_border);
1957 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1958 #define CYC_PER_MHZ 1E6
1959 if (total_recv > 0 || total_xmit > 0) {
1960 uint64_t total_pkts = 0;
1961 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
1962 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
1963 total_pkts = total_xmit;
1965 total_pkts = total_recv;
1967 printf("\n CPU cycles/packet=%.2F (total cycles="
1968 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
1970 (double) fwd_cycles / total_pkts,
1971 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
1972 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1978 fwd_stats_reset(void)
1984 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1985 pt_id = fwd_ports_ids[i];
1986 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1988 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1989 struct fwd_stream *fs = fwd_streams[sm_id];
1993 fs->fwd_dropped = 0;
1994 fs->rx_bad_ip_csum = 0;
1995 fs->rx_bad_l4_csum = 0;
1996 fs->rx_bad_outer_l4_csum = 0;
1998 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1999 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2000 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2002 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
2003 fs->core_cycles = 0;
2009 flush_fwd_rx_queues(void)
2011 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2018 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2019 uint64_t timer_period;
2021 /* convert to number of cycles */
2022 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2024 for (j = 0; j < 2; j++) {
2025 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2026 for (rxq = 0; rxq < nb_rxq; rxq++) {
2027 port_id = fwd_ports_ids[rxp];
2029 * testpmd can stuck in the below do while loop
2030 * if rte_eth_rx_burst() always returns nonzero
2031 * packets. So timer is added to exit this loop
2032 * after 1sec timer expiry.
2034 prev_tsc = rte_rdtsc();
2036 nb_rx = rte_eth_rx_burst(port_id, rxq,
2037 pkts_burst, MAX_PKT_BURST);
2038 for (i = 0; i < nb_rx; i++)
2039 rte_pktmbuf_free(pkts_burst[i]);
2041 cur_tsc = rte_rdtsc();
2042 diff_tsc = cur_tsc - prev_tsc;
2043 timer_tsc += diff_tsc;
2044 } while ((nb_rx > 0) &&
2045 (timer_tsc < timer_period));
2049 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2054 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2056 struct fwd_stream **fsm;
2059 #ifdef RTE_LIBRTE_BITRATE
2060 uint64_t tics_per_1sec;
2061 uint64_t tics_datum;
2062 uint64_t tics_current;
2063 uint16_t i, cnt_ports;
2065 cnt_ports = nb_ports;
2066 tics_datum = rte_rdtsc();
2067 tics_per_1sec = rte_get_timer_hz();
2069 fsm = &fwd_streams[fc->stream_idx];
2070 nb_fs = fc->stream_nb;
2072 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2073 (*pkt_fwd)(fsm[sm_id]);
2074 #ifdef RTE_LIBRTE_BITRATE
2075 if (bitrate_enabled != 0 &&
2076 bitrate_lcore_id == rte_lcore_id()) {
2077 tics_current = rte_rdtsc();
2078 if (tics_current - tics_datum >= tics_per_1sec) {
2079 /* Periodic bitrate calculation */
2080 for (i = 0; i < cnt_ports; i++)
2081 rte_stats_bitrate_calc(bitrate_data,
2083 tics_datum = tics_current;
2087 #ifdef RTE_LIBRTE_LATENCY_STATS
2088 if (latencystats_enabled != 0 &&
2089 latencystats_lcore_id == rte_lcore_id())
2090 rte_latencystats_update();
2093 } while (! fc->stopped);
2097 start_pkt_forward_on_core(void *fwd_arg)
2099 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2100 cur_fwd_config.fwd_eng->packet_fwd);
2105 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2106 * Used to start communication flows in network loopback test configurations.
2109 run_one_txonly_burst_on_core(void *fwd_arg)
2111 struct fwd_lcore *fwd_lc;
2112 struct fwd_lcore tmp_lcore;
2114 fwd_lc = (struct fwd_lcore *) fwd_arg;
2115 tmp_lcore = *fwd_lc;
2116 tmp_lcore.stopped = 1;
2117 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2122 * Launch packet forwarding:
2123 * - Setup per-port forwarding context.
2124 * - launch logical cores with their forwarding configuration.
2127 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2129 port_fwd_begin_t port_fwd_begin;
2134 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2135 if (port_fwd_begin != NULL) {
2136 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2137 (*port_fwd_begin)(fwd_ports_ids[i]);
2139 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2140 lc_id = fwd_lcores_cpuids[i];
2141 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2142 fwd_lcores[i]->stopped = 0;
2143 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2144 fwd_lcores[i], lc_id);
2146 printf("launch lcore %u failed - diag=%d\n",
2153 * Launch packet forwarding configuration.
2156 start_packet_forwarding(int with_tx_first)
2158 port_fwd_begin_t port_fwd_begin;
2159 port_fwd_end_t port_fwd_end;
2160 struct rte_port *port;
2164 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2165 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2167 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2168 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2170 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2171 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2172 (!nb_rxq || !nb_txq))
2173 rte_exit(EXIT_FAILURE,
2174 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2175 cur_fwd_eng->fwd_mode_name);
2177 if (all_ports_started() == 0) {
2178 printf("Not all ports were started\n");
2181 if (test_done == 0) {
2182 printf("Packet forwarding already started\n");
2188 for (i = 0; i < nb_fwd_ports; i++) {
2189 pt_id = fwd_ports_ids[i];
2190 port = &ports[pt_id];
2191 if (!port->dcb_flag) {
2192 printf("In DCB mode, all forwarding ports must "
2193 "be configured in this mode.\n");
2197 if (nb_fwd_lcores == 1) {
2198 printf("In DCB mode,the nb forwarding cores "
2199 "should be larger than 1.\n");
2208 flush_fwd_rx_queues();
2210 pkt_fwd_config_display(&cur_fwd_config);
2211 rxtx_config_display();
2214 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2215 pt_id = fwd_ports_ids[i];
2216 port = &ports[pt_id];
2217 map_port_queue_stats_mapping_registers(pt_id, port);
2219 if (with_tx_first) {
2220 port_fwd_begin = tx_only_engine.port_fwd_begin;
2221 if (port_fwd_begin != NULL) {
2222 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2223 (*port_fwd_begin)(fwd_ports_ids[i]);
2225 while (with_tx_first--) {
2226 launch_packet_forwarding(
2227 run_one_txonly_burst_on_core);
2228 rte_eal_mp_wait_lcore();
2230 port_fwd_end = tx_only_engine.port_fwd_end;
2231 if (port_fwd_end != NULL) {
2232 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2233 (*port_fwd_end)(fwd_ports_ids[i]);
2236 launch_packet_forwarding(start_pkt_forward_on_core);
2240 stop_packet_forwarding(void)
2242 port_fwd_end_t port_fwd_end;
2248 printf("Packet forwarding not started\n");
2251 printf("Telling cores to stop...");
2252 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2253 fwd_lcores[lc_id]->stopped = 1;
2254 printf("\nWaiting for lcores to finish...\n");
2255 rte_eal_mp_wait_lcore();
2256 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2257 if (port_fwd_end != NULL) {
2258 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2259 pt_id = fwd_ports_ids[i];
2260 (*port_fwd_end)(pt_id);
2264 fwd_stats_display();
2266 printf("\nDone.\n");
2271 dev_set_link_up(portid_t pid)
2273 if (rte_eth_dev_set_link_up(pid) < 0)
2274 printf("\nSet link up fail.\n");
2278 dev_set_link_down(portid_t pid)
2280 if (rte_eth_dev_set_link_down(pid) < 0)
2281 printf("\nSet link down fail.\n");
2285 all_ports_started(void)
2288 struct rte_port *port;
2290 RTE_ETH_FOREACH_DEV(pi) {
2292 /* Check if there is a port which is not started */
2293 if ((port->port_status != RTE_PORT_STARTED) &&
2294 (port->slave_flag == 0))
2298 /* No port is not started */
2303 port_is_stopped(portid_t port_id)
2305 struct rte_port *port = &ports[port_id];
2307 if ((port->port_status != RTE_PORT_STOPPED) &&
2308 (port->slave_flag == 0))
2314 all_ports_stopped(void)
2318 RTE_ETH_FOREACH_DEV(pi) {
2319 if (!port_is_stopped(pi))
2327 port_is_started(portid_t port_id)
2329 if (port_id_is_invalid(port_id, ENABLED_WARN))
2332 if (ports[port_id].port_status != RTE_PORT_STARTED)
2338 /* Configure the Rx and Tx hairpin queues for the selected port. */
2340 setup_hairpin_queues(portid_t pi)
2343 struct rte_eth_hairpin_conf hairpin_conf = {
2348 struct rte_port *port = &ports[pi];
2350 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2351 hairpin_conf.peers[0].port = pi;
2352 hairpin_conf.peers[0].queue = i + nb_rxq;
2353 diag = rte_eth_tx_hairpin_queue_setup
2354 (pi, qi, nb_txd, &hairpin_conf);
2359 /* Fail to setup rx queue, return */
2360 if (rte_atomic16_cmpset(&(port->port_status),
2362 RTE_PORT_STOPPED) == 0)
2363 printf("Port %d can not be set back "
2364 "to stopped\n", pi);
2365 printf("Fail to configure port %d hairpin "
2367 /* try to reconfigure queues next time */
2368 port->need_reconfig_queues = 1;
2371 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2372 hairpin_conf.peers[0].port = pi;
2373 hairpin_conf.peers[0].queue = i + nb_txq;
2374 diag = rte_eth_rx_hairpin_queue_setup
2375 (pi, qi, nb_rxd, &hairpin_conf);
2380 /* Fail to setup rx queue, return */
2381 if (rte_atomic16_cmpset(&(port->port_status),
2383 RTE_PORT_STOPPED) == 0)
2384 printf("Port %d can not be set back "
2385 "to stopped\n", pi);
2386 printf("Fail to configure port %d hairpin "
2388 /* try to reconfigure queues next time */
2389 port->need_reconfig_queues = 1;
2396 start_port(portid_t pid)
2398 int diag, need_check_link_status = -1;
2401 struct rte_port *port;
2402 struct rte_ether_addr mac_addr;
2403 struct rte_eth_hairpin_cap cap;
2405 if (port_id_is_invalid(pid, ENABLED_WARN))
2410 RTE_ETH_FOREACH_DEV(pi) {
2411 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2414 need_check_link_status = 0;
2416 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2417 RTE_PORT_HANDLING) == 0) {
2418 printf("Port %d is now not stopped\n", pi);
2422 if (port->need_reconfig > 0) {
2423 port->need_reconfig = 0;
2425 if (flow_isolate_all) {
2426 int ret = port_flow_isolate(pi, 1);
2428 printf("Failed to apply isolated"
2429 " mode on port %d\n", pi);
2433 configure_rxtx_dump_callbacks(0);
2434 printf("Configuring Port %d (socket %u)\n", pi,
2436 if (nb_hairpinq > 0 &&
2437 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2438 printf("Port %d doesn't support hairpin "
2442 /* configure port */
2443 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2444 nb_txq + nb_hairpinq,
2447 if (rte_atomic16_cmpset(&(port->port_status),
2448 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2449 printf("Port %d can not be set back "
2450 "to stopped\n", pi);
2451 printf("Fail to configure port %d\n", pi);
2452 /* try to reconfigure port next time */
2453 port->need_reconfig = 1;
2457 if (port->need_reconfig_queues > 0) {
2458 port->need_reconfig_queues = 0;
2459 /* setup tx queues */
2460 for (qi = 0; qi < nb_txq; qi++) {
2461 if ((numa_support) &&
2462 (txring_numa[pi] != NUMA_NO_CONFIG))
2463 diag = rte_eth_tx_queue_setup(pi, qi,
2464 port->nb_tx_desc[qi],
2466 &(port->tx_conf[qi]));
2468 diag = rte_eth_tx_queue_setup(pi, qi,
2469 port->nb_tx_desc[qi],
2471 &(port->tx_conf[qi]));
2476 /* Fail to setup tx queue, return */
2477 if (rte_atomic16_cmpset(&(port->port_status),
2479 RTE_PORT_STOPPED) == 0)
2480 printf("Port %d can not be set back "
2481 "to stopped\n", pi);
2482 printf("Fail to configure port %d tx queues\n",
2484 /* try to reconfigure queues next time */
2485 port->need_reconfig_queues = 1;
2488 for (qi = 0; qi < nb_rxq; qi++) {
2489 /* setup rx queues */
2490 if ((numa_support) &&
2491 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2492 struct rte_mempool * mp =
2493 mbuf_pool_find(rxring_numa[pi]);
2495 printf("Failed to setup RX queue:"
2496 "No mempool allocation"
2497 " on the socket %d\n",
2502 diag = rte_eth_rx_queue_setup(pi, qi,
2503 port->nb_rx_desc[qi],
2505 &(port->rx_conf[qi]),
2508 struct rte_mempool *mp =
2509 mbuf_pool_find(port->socket_id);
2511 printf("Failed to setup RX queue:"
2512 "No mempool allocation"
2513 " on the socket %d\n",
2517 diag = rte_eth_rx_queue_setup(pi, qi,
2518 port->nb_rx_desc[qi],
2520 &(port->rx_conf[qi]),
2526 /* Fail to setup rx queue, return */
2527 if (rte_atomic16_cmpset(&(port->port_status),
2529 RTE_PORT_STOPPED) == 0)
2530 printf("Port %d can not be set back "
2531 "to stopped\n", pi);
2532 printf("Fail to configure port %d rx queues\n",
2534 /* try to reconfigure queues next time */
2535 port->need_reconfig_queues = 1;
2538 /* setup hairpin queues */
2539 if (setup_hairpin_queues(pi) != 0)
2542 configure_rxtx_dump_callbacks(verbose_level);
2544 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2548 "Port %d: Failed to disable Ptype parsing\n",
2553 if (rte_eth_dev_start(pi) < 0) {
2554 printf("Fail to start port %d\n", pi);
2556 /* Fail to setup rx queue, return */
2557 if (rte_atomic16_cmpset(&(port->port_status),
2558 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2559 printf("Port %d can not be set back to "
2564 if (rte_atomic16_cmpset(&(port->port_status),
2565 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2566 printf("Port %d can not be set into started\n", pi);
2568 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2569 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2570 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2571 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2572 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2574 /* at least one port started, need checking link status */
2575 need_check_link_status = 1;
2578 if (need_check_link_status == 1 && !no_link_check)
2579 check_all_ports_link_status(RTE_PORT_ALL);
2580 else if (need_check_link_status == 0)
2581 printf("Please stop the ports first\n");
2588 stop_port(portid_t pid)
2591 struct rte_port *port;
2592 int need_check_link_status = 0;
2599 if (port_id_is_invalid(pid, ENABLED_WARN))
2602 printf("Stopping ports...\n");
2604 RTE_ETH_FOREACH_DEV(pi) {
2605 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2608 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2609 printf("Please remove port %d from forwarding configuration.\n", pi);
2613 if (port_is_bonding_slave(pi)) {
2614 printf("Please remove port %d from bonded device.\n", pi);
2619 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2620 RTE_PORT_HANDLING) == 0)
2623 rte_eth_dev_stop(pi);
2625 if (rte_atomic16_cmpset(&(port->port_status),
2626 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2627 printf("Port %d can not be set into stopped\n", pi);
2628 need_check_link_status = 1;
2630 if (need_check_link_status && !no_link_check)
2631 check_all_ports_link_status(RTE_PORT_ALL);
2637 remove_invalid_ports_in(portid_t *array, portid_t *total)
2640 portid_t new_total = 0;
2642 for (i = 0; i < *total; i++)
2643 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2644 array[new_total] = array[i];
2651 remove_invalid_ports(void)
2653 remove_invalid_ports_in(ports_ids, &nb_ports);
2654 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2655 nb_cfg_ports = nb_fwd_ports;
2659 close_port(portid_t pid)
2662 struct rte_port *port;
2664 if (port_id_is_invalid(pid, ENABLED_WARN))
2667 printf("Closing ports...\n");
2669 RTE_ETH_FOREACH_DEV(pi) {
2670 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2673 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2674 printf("Please remove port %d from forwarding configuration.\n", pi);
2678 if (port_is_bonding_slave(pi)) {
2679 printf("Please remove port %d from bonded device.\n", pi);
2684 if (rte_atomic16_cmpset(&(port->port_status),
2685 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2686 printf("Port %d is already closed\n", pi);
2690 if (rte_atomic16_cmpset(&(port->port_status),
2691 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2692 printf("Port %d is now not stopped\n", pi);
2696 if (port->flow_list)
2697 port_flow_flush(pi);
2698 rte_eth_dev_close(pi);
2700 remove_invalid_ports();
2702 if (rte_atomic16_cmpset(&(port->port_status),
2703 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2704 printf("Port %d cannot be set to closed\n", pi);
2711 reset_port(portid_t pid)
2715 struct rte_port *port;
2717 if (port_id_is_invalid(pid, ENABLED_WARN))
2720 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2721 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2722 printf("Can not reset port(s), please stop port(s) first.\n");
2726 printf("Resetting ports...\n");
2728 RTE_ETH_FOREACH_DEV(pi) {
2729 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2732 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2733 printf("Please remove port %d from forwarding "
2734 "configuration.\n", pi);
2738 if (port_is_bonding_slave(pi)) {
2739 printf("Please remove port %d from bonded device.\n",
2744 diag = rte_eth_dev_reset(pi);
2747 port->need_reconfig = 1;
2748 port->need_reconfig_queues = 1;
2750 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2758 attach_port(char *identifier)
2761 struct rte_dev_iterator iterator;
2763 printf("Attaching a new port...\n");
2765 if (identifier == NULL) {
2766 printf("Invalid parameters are specified\n");
2770 if (rte_dev_probe(identifier) < 0) {
2771 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2775 /* first attach mode: event */
2776 if (setup_on_probe_event) {
2777 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2778 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2779 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2780 ports[pi].need_setup != 0)
2781 setup_attached_port(pi);
2785 /* second attach mode: iterator */
2786 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2787 /* setup ports matching the devargs used for probing */
2788 if (port_is_forwarding(pi))
2789 continue; /* port was already attached before */
2790 setup_attached_port(pi);
2795 setup_attached_port(portid_t pi)
2797 unsigned int socket_id;
2800 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2801 /* if socket_id is invalid, set to the first available socket. */
2802 if (check_socket_id(socket_id) < 0)
2803 socket_id = socket_ids[0];
2804 reconfig(pi, socket_id);
2805 ret = rte_eth_promiscuous_enable(pi);
2807 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2808 pi, rte_strerror(-ret));
2810 ports_ids[nb_ports++] = pi;
2811 fwd_ports_ids[nb_fwd_ports++] = pi;
2812 nb_cfg_ports = nb_fwd_ports;
2813 ports[pi].need_setup = 0;
2814 ports[pi].port_status = RTE_PORT_STOPPED;
2816 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2821 detach_device(struct rte_device *dev)
2826 printf("Device already removed\n");
2830 printf("Removing a device...\n");
2832 if (rte_dev_remove(dev) < 0) {
2833 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2836 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2837 /* reset mapping between old ports and removed device */
2838 rte_eth_devices[sibling].device = NULL;
2839 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2840 /* sibling ports are forced to be closed */
2841 ports[sibling].port_status = RTE_PORT_CLOSED;
2842 printf("Port %u is closed\n", sibling);
2846 remove_invalid_ports();
2848 printf("Device is detached\n");
2849 printf("Now total ports is %d\n", nb_ports);
2855 detach_port_device(portid_t port_id)
2857 if (port_id_is_invalid(port_id, ENABLED_WARN))
2860 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2861 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2862 printf("Port not stopped\n");
2865 printf("Port was not closed\n");
2866 if (ports[port_id].flow_list)
2867 port_flow_flush(port_id);
2870 detach_device(rte_eth_devices[port_id].device);
2874 detach_devargs(char *identifier)
2876 struct rte_dev_iterator iterator;
2877 struct rte_devargs da;
2880 printf("Removing a device...\n");
2882 memset(&da, 0, sizeof(da));
2883 if (rte_devargs_parsef(&da, "%s", identifier)) {
2884 printf("cannot parse identifier\n");
2890 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2891 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2892 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2893 printf("Port %u not stopped\n", port_id);
2894 rte_eth_iterator_cleanup(&iterator);
2898 /* sibling ports are forced to be closed */
2899 if (ports[port_id].flow_list)
2900 port_flow_flush(port_id);
2901 ports[port_id].port_status = RTE_PORT_CLOSED;
2902 printf("Port %u is now closed\n", port_id);
2906 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2907 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2908 da.name, da.bus->name);
2912 remove_invalid_ports();
2914 printf("Device %s is detached\n", identifier);
2915 printf("Now total ports is %d\n", nb_ports);
2927 stop_packet_forwarding();
2929 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2931 if (mp_alloc_type == MP_ALLOC_ANON)
2932 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2936 if (ports != NULL) {
2938 RTE_ETH_FOREACH_DEV(pt_id) {
2939 printf("\nStopping port %d...\n", pt_id);
2943 RTE_ETH_FOREACH_DEV(pt_id) {
2944 printf("\nShutting down port %d...\n", pt_id);
2951 ret = rte_dev_event_monitor_stop();
2954 "fail to stop device event monitor.");
2958 ret = rte_dev_event_callback_unregister(NULL,
2959 dev_event_callback, NULL);
2962 "fail to unregister device event callback.\n");
2966 ret = rte_dev_hotplug_handle_disable();
2969 "fail to disable hotplug handling.\n");
2973 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2975 rte_mempool_free(mempools[i]);
2978 printf("\nBye...\n");
2981 typedef void (*cmd_func_t)(void);
2982 struct pmd_test_command {
2983 const char *cmd_name;
2984 cmd_func_t cmd_func;
2987 /* Check the link status of all ports in up to 9s, and print them finally */
2989 check_all_ports_link_status(uint32_t port_mask)
2991 #define CHECK_INTERVAL 100 /* 100ms */
2992 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2994 uint8_t count, all_ports_up, print_flag = 0;
2995 struct rte_eth_link link;
2998 printf("Checking link statuses...\n");
3000 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3002 RTE_ETH_FOREACH_DEV(portid) {
3003 if ((port_mask & (1 << portid)) == 0)
3005 memset(&link, 0, sizeof(link));
3006 ret = rte_eth_link_get_nowait(portid, &link);
3009 if (print_flag == 1)
3010 printf("Port %u link get failed: %s\n",
3011 portid, rte_strerror(-ret));
3014 /* print link status if flag set */
3015 if (print_flag == 1) {
3016 if (link.link_status)
3018 "Port%d Link Up. speed %u Mbps- %s\n",
3019 portid, link.link_speed,
3020 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3021 ("full-duplex") : ("half-duplex"));
3023 printf("Port %d Link Down\n", portid);
3026 /* clear all_ports_up flag if any link down */
3027 if (link.link_status == ETH_LINK_DOWN) {
3032 /* after finally printing all link status, get out */
3033 if (print_flag == 1)
3036 if (all_ports_up == 0) {
3038 rte_delay_ms(CHECK_INTERVAL);
3041 /* set the print_flag if all ports up or timeout */
3042 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3052 * This callback is for remove a port for a device. It has limitation because
3053 * it is not for multiple port removal for a device.
3054 * TODO: the device detach invoke will plan to be removed from user side to
3055 * eal. And convert all PMDs to free port resources on ether device closing.
3058 rmv_port_callback(void *arg)
3060 int need_to_start = 0;
3061 int org_no_link_check = no_link_check;
3062 portid_t port_id = (intptr_t)arg;
3063 struct rte_device *dev;
3065 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3067 if (!test_done && port_is_forwarding(port_id)) {
3069 stop_packet_forwarding();
3073 no_link_check = org_no_link_check;
3075 /* Save rte_device pointer before closing ethdev port */
3076 dev = rte_eth_devices[port_id].device;
3077 close_port(port_id);
3078 detach_device(dev); /* might be already removed or have more ports */
3081 start_packet_forwarding(0);
3084 /* This function is used by the interrupt thread */
3086 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3089 RTE_SET_USED(param);
3090 RTE_SET_USED(ret_param);
3092 if (type >= RTE_ETH_EVENT_MAX) {
3093 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3094 port_id, __func__, type);
3096 } else if (event_print_mask & (UINT32_C(1) << type)) {
3097 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3098 eth_event_desc[type]);
3103 case RTE_ETH_EVENT_NEW:
3104 ports[port_id].need_setup = 1;
3105 ports[port_id].port_status = RTE_PORT_HANDLING;
3107 case RTE_ETH_EVENT_INTR_RMV:
3108 if (port_id_is_invalid(port_id, DISABLED_WARN))
3110 if (rte_eal_alarm_set(100000,
3111 rmv_port_callback, (void *)(intptr_t)port_id))
3112 fprintf(stderr, "Could not set up deferred device removal\n");
3121 register_eth_event_callback(void)
3124 enum rte_eth_event_type event;
3126 for (event = RTE_ETH_EVENT_UNKNOWN;
3127 event < RTE_ETH_EVENT_MAX; event++) {
3128 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3133 TESTPMD_LOG(ERR, "Failed to register callback for "
3134 "%s event\n", eth_event_desc[event]);
3142 /* This function is used by the interrupt thread */
3144 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3145 __rte_unused void *arg)
3150 if (type >= RTE_DEV_EVENT_MAX) {
3151 fprintf(stderr, "%s called upon invalid event %d\n",
3157 case RTE_DEV_EVENT_REMOVE:
3158 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3160 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3162 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3167 * Because the user's callback is invoked in eal interrupt
3168 * callback, the interrupt callback need to be finished before
3169 * it can be unregistered when detaching device. So finish
3170 * callback soon and use a deferred removal to detach device
3171 * is need. It is a workaround, once the device detaching be
3172 * moved into the eal in the future, the deferred removal could
3175 if (rte_eal_alarm_set(100000,
3176 rmv_port_callback, (void *)(intptr_t)port_id))
3178 "Could not set up deferred device removal\n");
3180 case RTE_DEV_EVENT_ADD:
3181 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3183 /* TODO: After finish kernel driver binding,
3184 * begin to attach port.
3193 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3197 uint8_t mapping_found = 0;
3199 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3200 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3201 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3202 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3203 tx_queue_stats_mappings[i].queue_id,
3204 tx_queue_stats_mappings[i].stats_counter_id);
3211 port->tx_queue_stats_mapping_enabled = 1;
3216 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3220 uint8_t mapping_found = 0;
3222 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3223 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3224 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3225 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3226 rx_queue_stats_mappings[i].queue_id,
3227 rx_queue_stats_mappings[i].stats_counter_id);
3234 port->rx_queue_stats_mapping_enabled = 1;
3239 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3243 diag = set_tx_queue_stats_mapping_registers(pi, port);
3245 if (diag == -ENOTSUP) {
3246 port->tx_queue_stats_mapping_enabled = 0;
3247 printf("TX queue stats mapping not supported port id=%d\n", pi);
3250 rte_exit(EXIT_FAILURE,
3251 "set_tx_queue_stats_mapping_registers "
3252 "failed for port id=%d diag=%d\n",
3256 diag = set_rx_queue_stats_mapping_registers(pi, port);
3258 if (diag == -ENOTSUP) {
3259 port->rx_queue_stats_mapping_enabled = 0;
3260 printf("RX queue stats mapping not supported port id=%d\n", pi);
3263 rte_exit(EXIT_FAILURE,
3264 "set_rx_queue_stats_mapping_registers "
3265 "failed for port id=%d diag=%d\n",
3271 rxtx_port_config(struct rte_port *port)
3276 for (qid = 0; qid < nb_rxq; qid++) {
3277 offloads = port->rx_conf[qid].offloads;
3278 port->rx_conf[qid] = port->dev_info.default_rxconf;
3280 port->rx_conf[qid].offloads = offloads;
3282 /* Check if any Rx parameters have been passed */
3283 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3284 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3286 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3287 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3289 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3290 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3292 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3293 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3295 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3296 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3298 port->nb_rx_desc[qid] = nb_rxd;
3301 for (qid = 0; qid < nb_txq; qid++) {
3302 offloads = port->tx_conf[qid].offloads;
3303 port->tx_conf[qid] = port->dev_info.default_txconf;
3305 port->tx_conf[qid].offloads = offloads;
3307 /* Check if any Tx parameters have been passed */
3308 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3309 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3311 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3312 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3314 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3315 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3317 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3318 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3320 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3321 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3323 port->nb_tx_desc[qid] = nb_txd;
3328 init_port_config(void)
3331 struct rte_port *port;
3334 RTE_ETH_FOREACH_DEV(pid) {
3336 port->dev_conf.fdir_conf = fdir_conf;
3338 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3343 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3344 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3345 rss_hf & port->dev_info.flow_type_rss_offloads;
3347 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3348 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3351 if (port->dcb_flag == 0) {
3352 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3353 port->dev_conf.rxmode.mq_mode =
3354 (enum rte_eth_rx_mq_mode)
3355 (rx_mq_mode & ETH_MQ_RX_RSS);
3357 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3360 rxtx_port_config(port);
3362 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3366 map_port_queue_stats_mapping_registers(pid, port);
3367 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3368 rte_pmd_ixgbe_bypass_init(pid);
3371 if (lsc_interrupt &&
3372 (rte_eth_devices[pid].data->dev_flags &
3373 RTE_ETH_DEV_INTR_LSC))
3374 port->dev_conf.intr_conf.lsc = 1;
3375 if (rmv_interrupt &&
3376 (rte_eth_devices[pid].data->dev_flags &
3377 RTE_ETH_DEV_INTR_RMV))
3378 port->dev_conf.intr_conf.rmv = 1;
3382 void set_port_slave_flag(portid_t slave_pid)
3384 struct rte_port *port;
3386 port = &ports[slave_pid];
3387 port->slave_flag = 1;
3390 void clear_port_slave_flag(portid_t slave_pid)
3392 struct rte_port *port;
3394 port = &ports[slave_pid];
3395 port->slave_flag = 0;
3398 uint8_t port_is_bonding_slave(portid_t slave_pid)
3400 struct rte_port *port;
3402 port = &ports[slave_pid];
3403 if ((rte_eth_devices[slave_pid].data->dev_flags &
3404 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3409 const uint16_t vlan_tags[] = {
3410 0, 1, 2, 3, 4, 5, 6, 7,
3411 8, 9, 10, 11, 12, 13, 14, 15,
3412 16, 17, 18, 19, 20, 21, 22, 23,
3413 24, 25, 26, 27, 28, 29, 30, 31
3417 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3418 enum dcb_mode_enable dcb_mode,
3419 enum rte_eth_nb_tcs num_tcs,
3424 struct rte_eth_rss_conf rss_conf;
3427 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3428 * given above, and the number of traffic classes available for use.
3430 if (dcb_mode == DCB_VT_ENABLED) {
3431 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3432 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3433 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3434 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3436 /* VMDQ+DCB RX and TX configurations */
3437 vmdq_rx_conf->enable_default_pool = 0;
3438 vmdq_rx_conf->default_pool = 0;
3439 vmdq_rx_conf->nb_queue_pools =
3440 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3441 vmdq_tx_conf->nb_queue_pools =
3442 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3444 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3445 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3446 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3447 vmdq_rx_conf->pool_map[i].pools =
3448 1 << (i % vmdq_rx_conf->nb_queue_pools);
3450 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3451 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3452 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3455 /* set DCB mode of RX and TX of multiple queues */
3456 eth_conf->rxmode.mq_mode =
3457 (enum rte_eth_rx_mq_mode)
3458 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3459 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3461 struct rte_eth_dcb_rx_conf *rx_conf =
3462 ð_conf->rx_adv_conf.dcb_rx_conf;
3463 struct rte_eth_dcb_tx_conf *tx_conf =
3464 ð_conf->tx_adv_conf.dcb_tx_conf;
3466 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3468 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3472 rx_conf->nb_tcs = num_tcs;
3473 tx_conf->nb_tcs = num_tcs;
3475 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3476 rx_conf->dcb_tc[i] = i % num_tcs;
3477 tx_conf->dcb_tc[i] = i % num_tcs;
3480 eth_conf->rxmode.mq_mode =
3481 (enum rte_eth_rx_mq_mode)
3482 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3483 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3484 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3488 eth_conf->dcb_capability_en =
3489 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3491 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3497 init_port_dcb_config(portid_t pid,
3498 enum dcb_mode_enable dcb_mode,
3499 enum rte_eth_nb_tcs num_tcs,
3502 struct rte_eth_conf port_conf;
3503 struct rte_port *rte_port;
3507 rte_port = &ports[pid];
3509 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3510 /* Enter DCB configuration status */
3513 port_conf.rxmode = rte_port->dev_conf.rxmode;
3514 port_conf.txmode = rte_port->dev_conf.txmode;
3516 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3517 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3520 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3522 /* re-configure the device . */
3523 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3527 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3531 /* If dev_info.vmdq_pool_base is greater than 0,
3532 * the queue id of vmdq pools is started after pf queues.
3534 if (dcb_mode == DCB_VT_ENABLED &&
3535 rte_port->dev_info.vmdq_pool_base > 0) {
3536 printf("VMDQ_DCB multi-queue mode is nonsensical"
3537 " for port %d.", pid);
3541 /* Assume the ports in testpmd have the same dcb capability
3542 * and has the same number of rxq and txq in dcb mode
3544 if (dcb_mode == DCB_VT_ENABLED) {
3545 if (rte_port->dev_info.max_vfs > 0) {
3546 nb_rxq = rte_port->dev_info.nb_rx_queues;
3547 nb_txq = rte_port->dev_info.nb_tx_queues;
3549 nb_rxq = rte_port->dev_info.max_rx_queues;
3550 nb_txq = rte_port->dev_info.max_tx_queues;
3553 /*if vt is disabled, use all pf queues */
3554 if (rte_port->dev_info.vmdq_pool_base == 0) {
3555 nb_rxq = rte_port->dev_info.max_rx_queues;
3556 nb_txq = rte_port->dev_info.max_tx_queues;
3558 nb_rxq = (queueid_t)num_tcs;
3559 nb_txq = (queueid_t)num_tcs;
3563 rx_free_thresh = 64;
3565 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3567 rxtx_port_config(rte_port);
3569 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3570 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3571 rx_vft_set(pid, vlan_tags[i], 1);
3573 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3577 map_port_queue_stats_mapping_registers(pid, rte_port);
3579 rte_port->dcb_flag = 1;
3587 /* Configuration of Ethernet ports. */
3588 ports = rte_zmalloc("testpmd: ports",
3589 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3590 RTE_CACHE_LINE_SIZE);
3591 if (ports == NULL) {
3592 rte_exit(EXIT_FAILURE,
3593 "rte_zmalloc(%d struct rte_port) failed\n",
3597 /* Initialize ports NUMA structures */
3598 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3599 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3600 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3614 const char clr[] = { 27, '[', '2', 'J', '\0' };
3615 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3617 /* Clear screen and move to top left */
3618 printf("%s%s", clr, top_left);
3620 printf("\nPort statistics ====================================");
3621 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3622 nic_stats_display(fwd_ports_ids[i]);
3628 signal_handler(int signum)
3630 if (signum == SIGINT || signum == SIGTERM) {
3631 printf("\nSignal %d received, preparing to exit...\n",
3633 #ifdef RTE_LIBRTE_PDUMP
3634 /* uninitialize packet capture framework */
3637 #ifdef RTE_LIBRTE_LATENCY_STATS
3638 if (latencystats_enabled != 0)
3639 rte_latencystats_uninit();
3642 /* Set flag to indicate the force termination. */
3644 /* exit with the expected status */
3645 signal(signum, SIG_DFL);
3646 kill(getpid(), signum);
3651 main(int argc, char** argv)
3658 signal(SIGINT, signal_handler);
3659 signal(SIGTERM, signal_handler);
3661 testpmd_logtype = rte_log_register("testpmd");
3662 if (testpmd_logtype < 0)
3663 rte_exit(EXIT_FAILURE, "Cannot register log type");
3664 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3666 diag = rte_eal_init(argc, argv);
3668 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3669 rte_strerror(rte_errno));
3671 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3672 rte_exit(EXIT_FAILURE,
3673 "Secondary process type not supported.\n");
3675 ret = register_eth_event_callback();
3677 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3679 #ifdef RTE_LIBRTE_PDUMP
3680 /* initialize packet capture framework */
3685 RTE_ETH_FOREACH_DEV(port_id) {
3686 ports_ids[count] = port_id;
3689 nb_ports = (portid_t) count;
3691 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3693 /* allocate port structures, and init them */
3696 set_def_fwd_config();
3698 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3699 "Check the core mask argument\n");
3701 /* Bitrate/latency stats disabled by default */
3702 #ifdef RTE_LIBRTE_BITRATE
3703 bitrate_enabled = 0;
3705 #ifdef RTE_LIBRTE_LATENCY_STATS
3706 latencystats_enabled = 0;
3709 /* on FreeBSD, mlockall() is disabled by default */
3710 #ifdef RTE_EXEC_ENV_FREEBSD
3719 launch_args_parse(argc, argv);
3721 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3722 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3726 if (tx_first && interactive)
3727 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3728 "interactive mode.\n");
3730 if (tx_first && lsc_interrupt) {
3731 printf("Warning: lsc_interrupt needs to be off when "
3732 " using tx_first. Disabling.\n");
3736 if (!nb_rxq && !nb_txq)
3737 printf("Warning: Either rx or tx queues should be non-zero\n");
3739 if (nb_rxq > 1 && nb_rxq > nb_txq)
3740 printf("Warning: nb_rxq=%d enables RSS configuration, "
3741 "but nb_txq=%d will prevent to fully test it.\n",
3747 ret = rte_dev_hotplug_handle_enable();
3750 "fail to enable hotplug handling.");
3754 ret = rte_dev_event_monitor_start();
3757 "fail to start device event monitoring.");
3761 ret = rte_dev_event_callback_register(NULL,
3762 dev_event_callback, NULL);
3765 "fail to register device event callback\n");
3770 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3771 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3773 /* set all ports to promiscuous mode by default */
3774 RTE_ETH_FOREACH_DEV(port_id) {
3775 ret = rte_eth_promiscuous_enable(port_id);
3777 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3778 port_id, rte_strerror(-ret));
3781 /* Init metrics library */
3782 rte_metrics_init(rte_socket_id());
3784 #ifdef RTE_LIBRTE_LATENCY_STATS
3785 if (latencystats_enabled != 0) {
3786 int ret = rte_latencystats_init(1, NULL);
3788 printf("Warning: latencystats init()"
3789 " returned error %d\n", ret);
3790 printf("Latencystats running on lcore %d\n",
3791 latencystats_lcore_id);
3795 /* Setup bitrate stats */
3796 #ifdef RTE_LIBRTE_BITRATE
3797 if (bitrate_enabled != 0) {
3798 bitrate_data = rte_stats_bitrate_create();
3799 if (bitrate_data == NULL)
3800 rte_exit(EXIT_FAILURE,
3801 "Could not allocate bitrate data.\n");
3802 rte_stats_bitrate_reg(bitrate_data);
3806 #ifdef RTE_LIBRTE_CMDLINE
3807 if (strlen(cmdline_filename) != 0)
3808 cmdline_read_from_file(cmdline_filename);
3810 if (interactive == 1) {
3812 printf("Start automatic packet forwarding\n");
3813 start_packet_forwarding(0);
3825 printf("No commandline core given, start packet forwarding\n");
3826 start_packet_forwarding(tx_first);
3827 if (stats_period != 0) {
3828 uint64_t prev_time = 0, cur_time, diff_time = 0;
3829 uint64_t timer_period;
3831 /* Convert to number of cycles */
3832 timer_period = stats_period * rte_get_timer_hz();
3834 while (f_quit == 0) {
3835 cur_time = rte_get_timer_cycles();
3836 diff_time += cur_time - prev_time;
3838 if (diff_time >= timer_period) {
3840 /* Reset the timer */
3843 /* Sleep to avoid unnecessary checks */
3844 prev_time = cur_time;
3849 printf("Press enter to exit\n");
3850 rc = read(0, &c, 1);
3856 ret = rte_eal_cleanup();
3858 rte_exit(EXIT_FAILURE,
3859 "EAL cleanup failed: %s\n", strerror(-ret));
3861 return EXIT_SUCCESS;