1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
239 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
240 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
243 * Configurable number of RX/TX ring descriptors.
244 * Defaults are supplied by drivers via ethdev.
246 #define RTE_TEST_RX_DESC_DEFAULT 0
247 #define RTE_TEST_TX_DESC_DEFAULT 0
248 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
249 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
251 #define RTE_PMD_PARAM_UNSET -1
253 * Configurable values of RX and TX ring threshold registers.
256 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
258 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
262 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
265 * Configurable value of RX free threshold.
267 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
270 * Configurable value of RX drop enable.
272 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
275 * Configurable value of TX free threshold.
277 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
280 * Configurable value of TX RS bit threshold.
282 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
285 * Configurable value of buffered packets before sending.
287 uint16_t noisy_tx_sw_bufsz;
290 * Configurable value of packet buffer timeout.
292 uint16_t noisy_tx_sw_buf_flush_time;
295 * Configurable value for size of VNF internal memory area
296 * used for simulating noisy neighbour behaviour
298 uint64_t noisy_lkup_mem_sz;
301 * Configurable value of number of random writes done in
302 * VNF simulation memory area.
304 uint64_t noisy_lkup_num_writes;
307 * Configurable value of number of random reads done in
308 * VNF simulation memory area.
310 uint64_t noisy_lkup_num_reads;
313 * Configurable value of number of random reads/writes done in
314 * VNF simulation memory area.
316 uint64_t noisy_lkup_num_reads_writes;
319 * Receive Side Scaling (RSS) configuration.
321 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
324 * Port topology configuration
326 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
329 * Avoids to flush all the RX streams before starts forwarding.
331 uint8_t no_flush_rx = 0; /* flush by default */
334 * Flow API isolated mode.
336 uint8_t flow_isolate_all;
339 * Avoids to check link status when starting/stopping a port.
341 uint8_t no_link_check = 0; /* check by default */
344 * Don't automatically start all ports in interactive mode.
346 uint8_t no_device_start = 0;
349 * Enable link status change notification
351 uint8_t lsc_interrupt = 1; /* enabled by default */
354 * Enable device removal notification.
356 uint8_t rmv_interrupt = 1; /* enabled by default */
358 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
360 /* After attach, port setup is called on event or by iterator */
361 bool setup_on_probe_event = true;
363 /* Clear ptypes on port initialization. */
364 uint8_t clear_ptypes = true;
366 /* Pretty printing of ethdev events */
367 static const char * const eth_event_desc[] = {
368 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
369 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
370 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
371 [RTE_ETH_EVENT_INTR_RESET] = "reset",
372 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
373 [RTE_ETH_EVENT_IPSEC] = "IPsec",
374 [RTE_ETH_EVENT_MACSEC] = "MACsec",
375 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
376 [RTE_ETH_EVENT_NEW] = "device probed",
377 [RTE_ETH_EVENT_DESTROY] = "device released",
378 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
379 [RTE_ETH_EVENT_MAX] = NULL,
383 * Display or mask ether events
384 * Default to all events except VF_MBOX
386 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
387 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
388 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
389 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
390 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
391 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
392 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
393 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
395 * Decide if all memory are locked for performance.
400 * NIC bypass mode configuration options.
403 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
404 /* The NIC bypass watchdog timeout. */
405 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
409 #ifdef RTE_LIBRTE_LATENCY_STATS
412 * Set when latency stats is enabled in the commandline
414 uint8_t latencystats_enabled;
417 * Lcore ID to serive latency statistics.
419 lcoreid_t latencystats_lcore_id = -1;
424 * Ethernet device configuration.
426 struct rte_eth_rxmode rx_mode = {
427 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
428 /**< Default maximum frame length. */
431 struct rte_eth_txmode tx_mode = {
432 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
435 struct rte_fdir_conf fdir_conf = {
436 .mode = RTE_FDIR_MODE_NONE,
437 .pballoc = RTE_FDIR_PBALLOC_64K,
438 .status = RTE_FDIR_REPORT_STATUS,
440 .vlan_tci_mask = 0xFFEF,
442 .src_ip = 0xFFFFFFFF,
443 .dst_ip = 0xFFFFFFFF,
446 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
447 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
449 .src_port_mask = 0xFFFF,
450 .dst_port_mask = 0xFFFF,
451 .mac_addr_byte_mask = 0xFF,
452 .tunnel_type_mask = 1,
453 .tunnel_id_mask = 0xFFFFFFFF,
458 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
460 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
461 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
463 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
464 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
466 uint16_t nb_tx_queue_stats_mappings = 0;
467 uint16_t nb_rx_queue_stats_mappings = 0;
470 * Display zero values by default for xstats
472 uint8_t xstats_hide_zero;
474 unsigned int num_sockets = 0;
475 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
477 #ifdef RTE_LIBRTE_BITRATE
478 /* Bitrate statistics */
479 struct rte_stats_bitrates *bitrate_data;
480 lcoreid_t bitrate_lcore_id;
481 uint8_t bitrate_enabled;
484 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
485 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
487 /* Forward function declarations */
488 static void setup_attached_port(portid_t pi);
489 static void map_port_queue_stats_mapping_registers(portid_t pi,
490 struct rte_port *port);
491 static void check_all_ports_link_status(uint32_t port_mask);
492 static int eth_event_callback(portid_t port_id,
493 enum rte_eth_event_type type,
494 void *param, void *ret_param);
495 static void dev_event_callback(const char *device_name,
496 enum rte_dev_event_type type,
500 * Check if all the ports are started.
501 * If yes, return positive value. If not, return zero.
503 static int all_ports_started(void);
505 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
506 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
508 /* Holds the registered mbuf dynamic flags names. */
509 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
512 * Helper function to check if socket is already discovered.
513 * If yes, return positive value. If not, return zero.
516 new_socket_id(unsigned int socket_id)
520 for (i = 0; i < num_sockets; i++) {
521 if (socket_ids[i] == socket_id)
528 * Setup default configuration.
531 set_default_fwd_lcores_config(void)
535 unsigned int sock_num;
538 for (i = 0; i < RTE_MAX_LCORE; i++) {
539 if (!rte_lcore_is_enabled(i))
541 sock_num = rte_lcore_to_socket_id(i);
542 if (new_socket_id(sock_num)) {
543 if (num_sockets >= RTE_MAX_NUMA_NODES) {
544 rte_exit(EXIT_FAILURE,
545 "Total sockets greater than %u\n",
548 socket_ids[num_sockets++] = sock_num;
550 if (i == rte_get_master_lcore())
552 fwd_lcores_cpuids[nb_lc++] = i;
554 nb_lcores = (lcoreid_t) nb_lc;
555 nb_cfg_lcores = nb_lcores;
560 set_def_peer_eth_addrs(void)
564 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
565 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
566 peer_eth_addrs[i].addr_bytes[5] = i;
571 set_default_fwd_ports_config(void)
576 RTE_ETH_FOREACH_DEV(pt_id) {
577 fwd_ports_ids[i++] = pt_id;
579 /* Update sockets info according to the attached device */
580 int socket_id = rte_eth_dev_socket_id(pt_id);
581 if (socket_id >= 0 && new_socket_id(socket_id)) {
582 if (num_sockets >= RTE_MAX_NUMA_NODES) {
583 rte_exit(EXIT_FAILURE,
584 "Total sockets greater than %u\n",
587 socket_ids[num_sockets++] = socket_id;
591 nb_cfg_ports = nb_ports;
592 nb_fwd_ports = nb_ports;
596 set_def_fwd_config(void)
598 set_default_fwd_lcores_config();
599 set_def_peer_eth_addrs();
600 set_default_fwd_ports_config();
603 /* extremely pessimistic estimation of memory required to create a mempool */
605 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
607 unsigned int n_pages, mbuf_per_pg, leftover;
608 uint64_t total_mem, mbuf_mem, obj_sz;
610 /* there is no good way to predict how much space the mempool will
611 * occupy because it will allocate chunks on the fly, and some of those
612 * will come from default DPDK memory while some will come from our
613 * external memory, so just assume 128MB will be enough for everyone.
615 uint64_t hdr_mem = 128 << 20;
617 /* account for possible non-contiguousness */
618 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
620 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
624 mbuf_per_pg = pgsz / obj_sz;
625 leftover = (nb_mbufs % mbuf_per_pg) > 0;
626 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
628 mbuf_mem = n_pages * pgsz;
630 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
632 if (total_mem > SIZE_MAX) {
633 TESTPMD_LOG(ERR, "Memory size too big\n");
636 *out = (size_t)total_mem;
642 pagesz_flags(uint64_t page_sz)
644 /* as per mmap() manpage, all page sizes are log2 of page size
645 * shifted by MAP_HUGE_SHIFT
647 int log2 = rte_log2_u64(page_sz);
649 return (log2 << HUGE_SHIFT);
653 alloc_mem(size_t memsz, size_t pgsz, bool huge)
658 /* allocate anonymous hugepages */
659 flags = MAP_ANONYMOUS | MAP_PRIVATE;
661 flags |= HUGE_FLAG | pagesz_flags(pgsz);
663 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
664 if (addr == MAP_FAILED)
670 struct extmem_param {
674 rte_iova_t *iova_table;
675 unsigned int iova_table_len;
679 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
682 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
683 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
684 unsigned int cur_page, n_pages, pgsz_idx;
685 size_t mem_sz, cur_pgsz;
686 rte_iova_t *iovas = NULL;
690 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
691 /* skip anything that is too big */
692 if (pgsizes[pgsz_idx] > SIZE_MAX)
695 cur_pgsz = pgsizes[pgsz_idx];
697 /* if we were told not to allocate hugepages, override */
699 cur_pgsz = sysconf(_SC_PAGESIZE);
701 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
703 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
707 /* allocate our memory */
708 addr = alloc_mem(mem_sz, cur_pgsz, huge);
710 /* if we couldn't allocate memory with a specified page size,
711 * that doesn't mean we can't do it with other page sizes, so
717 /* store IOVA addresses for every page in this memory area */
718 n_pages = mem_sz / cur_pgsz;
720 iovas = malloc(sizeof(*iovas) * n_pages);
723 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
726 /* lock memory if it's not huge pages */
730 /* populate IOVA addresses */
731 for (cur_page = 0; cur_page < n_pages; cur_page++) {
736 offset = cur_pgsz * cur_page;
737 cur = RTE_PTR_ADD(addr, offset);
739 /* touch the page before getting its IOVA */
740 *(volatile char *)cur = 0;
742 iova = rte_mem_virt2iova(cur);
744 iovas[cur_page] = iova;
749 /* if we couldn't allocate anything */
755 param->pgsz = cur_pgsz;
756 param->iova_table = iovas;
757 param->iova_table_len = n_pages;
764 munmap(addr, mem_sz);
770 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
772 struct extmem_param param;
775 memset(¶m, 0, sizeof(param));
777 /* check if our heap exists */
778 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
780 /* create our heap */
781 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
783 TESTPMD_LOG(ERR, "Cannot create heap\n");
788 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
790 TESTPMD_LOG(ERR, "Cannot create memory area\n");
794 /* we now have a valid memory area, so add it to heap */
795 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
796 param.addr, param.len, param.iova_table,
797 param.iova_table_len, param.pgsz);
799 /* when using VFIO, memory is automatically mapped for DMA by EAL */
801 /* not needed any more */
802 free(param.iova_table);
805 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
806 munmap(param.addr, param.len);
812 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
818 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
819 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
824 RTE_ETH_FOREACH_DEV(pid) {
825 struct rte_eth_dev *dev =
826 &rte_eth_devices[pid];
828 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
832 "unable to DMA unmap addr 0x%p "
834 memhdr->addr, dev->data->name);
837 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
840 "unable to un-register addr 0x%p\n", memhdr->addr);
845 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
846 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
849 size_t page_size = sysconf(_SC_PAGESIZE);
852 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
856 "unable to register addr 0x%p\n", memhdr->addr);
859 RTE_ETH_FOREACH_DEV(pid) {
860 struct rte_eth_dev *dev =
861 &rte_eth_devices[pid];
863 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
867 "unable to DMA map addr 0x%p "
869 memhdr->addr, dev->data->name);
875 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
876 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
878 struct rte_pktmbuf_extmem *xmem;
879 unsigned int ext_num, zone_num, elt_num;
882 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
883 elt_num = EXTBUF_ZONE_SIZE / elt_size;
884 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
886 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
888 TESTPMD_LOG(ERR, "Cannot allocate memory for "
889 "external buffer descriptors\n");
893 for (ext_num = 0; ext_num < zone_num; ext_num++) {
894 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
895 const struct rte_memzone *mz;
896 char mz_name[RTE_MEMZONE_NAMESIZE];
899 ret = snprintf(mz_name, sizeof(mz_name),
900 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
901 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
902 errno = ENAMETOOLONG;
906 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
908 RTE_MEMZONE_IOVA_CONTIG |
910 RTE_MEMZONE_SIZE_HINT_ONLY,
914 * The caller exits on external buffer creation
915 * error, so there is no need to free memzones.
921 xseg->buf_ptr = mz->addr;
922 xseg->buf_iova = mz->iova;
923 xseg->buf_len = EXTBUF_ZONE_SIZE;
924 xseg->elt_size = elt_size;
926 if (ext_num == 0 && xmem != NULL) {
935 * Configuration initialisation done once at init time.
937 static struct rte_mempool *
938 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
939 unsigned int socket_id)
941 char pool_name[RTE_MEMPOOL_NAMESIZE];
942 struct rte_mempool *rte_mp = NULL;
945 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
946 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
949 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
950 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
952 switch (mp_alloc_type) {
953 case MP_ALLOC_NATIVE:
955 /* wrapper to rte_mempool_create() */
956 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
957 rte_mbuf_best_mempool_ops());
958 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
959 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
964 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
965 mb_size, (unsigned int) mb_mempool_cache,
966 sizeof(struct rte_pktmbuf_pool_private),
967 socket_id, mempool_flags);
971 if (rte_mempool_populate_anon(rte_mp) == 0) {
972 rte_mempool_free(rte_mp);
976 rte_pktmbuf_pool_init(rte_mp, NULL);
977 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
978 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
982 case MP_ALLOC_XMEM_HUGE:
985 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
987 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
988 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
991 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
993 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
995 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
996 rte_mbuf_best_mempool_ops());
997 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
998 mb_mempool_cache, 0, mbuf_seg_size,
1004 struct rte_pktmbuf_extmem *ext_mem;
1005 unsigned int ext_num;
1007 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1008 socket_id, pool_name, &ext_mem);
1010 rte_exit(EXIT_FAILURE,
1011 "Can't create pinned data buffers\n");
1013 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1014 rte_mbuf_best_mempool_ops());
1015 rte_mp = rte_pktmbuf_pool_create_extbuf
1016 (pool_name, nb_mbuf, mb_mempool_cache,
1017 0, mbuf_seg_size, socket_id,
1024 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1029 if (rte_mp == NULL) {
1030 rte_exit(EXIT_FAILURE,
1031 "Creation of mbuf pool for socket %u failed: %s\n",
1032 socket_id, rte_strerror(rte_errno));
1033 } else if (verbose_level > 0) {
1034 rte_mempool_dump(stdout, rte_mp);
1040 * Check given socket id is valid or not with NUMA mode,
1041 * if valid, return 0, else return -1
1044 check_socket_id(const unsigned int socket_id)
1046 static int warning_once = 0;
1048 if (new_socket_id(socket_id)) {
1049 if (!warning_once && numa_support)
1050 printf("Warning: NUMA should be configured manually by"
1051 " using --port-numa-config and"
1052 " --ring-numa-config parameters along with"
1061 * Get the allowed maximum number of RX queues.
1062 * *pid return the port id which has minimal value of
1063 * max_rx_queues in all ports.
1066 get_allowed_max_nb_rxq(portid_t *pid)
1068 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1069 bool max_rxq_valid = false;
1071 struct rte_eth_dev_info dev_info;
1073 RTE_ETH_FOREACH_DEV(pi) {
1074 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1077 max_rxq_valid = true;
1078 if (dev_info.max_rx_queues < allowed_max_rxq) {
1079 allowed_max_rxq = dev_info.max_rx_queues;
1083 return max_rxq_valid ? allowed_max_rxq : 0;
1087 * Check input rxq is valid or not.
1088 * If input rxq is not greater than any of maximum number
1089 * of RX queues of all ports, it is valid.
1090 * if valid, return 0, else return -1
1093 check_nb_rxq(queueid_t rxq)
1095 queueid_t allowed_max_rxq;
1098 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1099 if (rxq > allowed_max_rxq) {
1100 printf("Fail: input rxq (%u) can't be greater "
1101 "than max_rx_queues (%u) of port %u\n",
1111 * Get the allowed maximum number of TX queues.
1112 * *pid return the port id which has minimal value of
1113 * max_tx_queues in all ports.
1116 get_allowed_max_nb_txq(portid_t *pid)
1118 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1119 bool max_txq_valid = false;
1121 struct rte_eth_dev_info dev_info;
1123 RTE_ETH_FOREACH_DEV(pi) {
1124 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1127 max_txq_valid = true;
1128 if (dev_info.max_tx_queues < allowed_max_txq) {
1129 allowed_max_txq = dev_info.max_tx_queues;
1133 return max_txq_valid ? allowed_max_txq : 0;
1137 * Check input txq is valid or not.
1138 * If input txq is not greater than any of maximum number
1139 * of TX queues of all ports, it is valid.
1140 * if valid, return 0, else return -1
1143 check_nb_txq(queueid_t txq)
1145 queueid_t allowed_max_txq;
1148 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1149 if (txq > allowed_max_txq) {
1150 printf("Fail: input txq (%u) can't be greater "
1151 "than max_tx_queues (%u) of port %u\n",
1161 * Get the allowed maximum number of RXDs of every rx queue.
1162 * *pid return the port id which has minimal value of
1163 * max_rxd in all queues of all ports.
1166 get_allowed_max_nb_rxd(portid_t *pid)
1168 uint16_t allowed_max_rxd = UINT16_MAX;
1170 struct rte_eth_dev_info dev_info;
1172 RTE_ETH_FOREACH_DEV(pi) {
1173 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1176 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1177 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1181 return allowed_max_rxd;
1185 * Get the allowed minimal number of RXDs of every rx queue.
1186 * *pid return the port id which has minimal value of
1187 * min_rxd in all queues of all ports.
1190 get_allowed_min_nb_rxd(portid_t *pid)
1192 uint16_t allowed_min_rxd = 0;
1194 struct rte_eth_dev_info dev_info;
1196 RTE_ETH_FOREACH_DEV(pi) {
1197 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1200 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1201 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1206 return allowed_min_rxd;
1210 * Check input rxd is valid or not.
1211 * If input rxd is not greater than any of maximum number
1212 * of RXDs of every Rx queues and is not less than any of
1213 * minimal number of RXDs of every Rx queues, it is valid.
1214 * if valid, return 0, else return -1
1217 check_nb_rxd(queueid_t rxd)
1219 uint16_t allowed_max_rxd;
1220 uint16_t allowed_min_rxd;
1223 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1224 if (rxd > allowed_max_rxd) {
1225 printf("Fail: input rxd (%u) can't be greater "
1226 "than max_rxds (%u) of port %u\n",
1233 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1234 if (rxd < allowed_min_rxd) {
1235 printf("Fail: input rxd (%u) can't be less "
1236 "than min_rxds (%u) of port %u\n",
1247 * Get the allowed maximum number of TXDs of every rx queues.
1248 * *pid return the port id which has minimal value of
1249 * max_txd in every tx queue.
1252 get_allowed_max_nb_txd(portid_t *pid)
1254 uint16_t allowed_max_txd = UINT16_MAX;
1256 struct rte_eth_dev_info dev_info;
1258 RTE_ETH_FOREACH_DEV(pi) {
1259 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1262 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1263 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1267 return allowed_max_txd;
1271 * Get the allowed maximum number of TXDs of every tx queues.
1272 * *pid return the port id which has minimal value of
1273 * min_txd in every tx queue.
1276 get_allowed_min_nb_txd(portid_t *pid)
1278 uint16_t allowed_min_txd = 0;
1280 struct rte_eth_dev_info dev_info;
1282 RTE_ETH_FOREACH_DEV(pi) {
1283 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1286 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1287 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1292 return allowed_min_txd;
1296 * Check input txd is valid or not.
1297 * If input txd is not greater than any of maximum number
1298 * of TXDs of every Rx queues, it is valid.
1299 * if valid, return 0, else return -1
1302 check_nb_txd(queueid_t txd)
1304 uint16_t allowed_max_txd;
1305 uint16_t allowed_min_txd;
1308 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1309 if (txd > allowed_max_txd) {
1310 printf("Fail: input txd (%u) can't be greater "
1311 "than max_txds (%u) of port %u\n",
1318 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1319 if (txd < allowed_min_txd) {
1320 printf("Fail: input txd (%u) can't be less "
1321 "than min_txds (%u) of port %u\n",
1332 * Get the allowed maximum number of hairpin queues.
1333 * *pid return the port id which has minimal value of
1334 * max_hairpin_queues in all ports.
1337 get_allowed_max_nb_hairpinq(portid_t *pid)
1339 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1341 struct rte_eth_hairpin_cap cap;
1343 RTE_ETH_FOREACH_DEV(pi) {
1344 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1348 if (cap.max_nb_queues < allowed_max_hairpinq) {
1349 allowed_max_hairpinq = cap.max_nb_queues;
1353 return allowed_max_hairpinq;
1357 * Check input hairpin is valid or not.
1358 * If input hairpin is not greater than any of maximum number
1359 * of hairpin queues of all ports, it is valid.
1360 * if valid, return 0, else return -1
1363 check_nb_hairpinq(queueid_t hairpinq)
1365 queueid_t allowed_max_hairpinq;
1368 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1369 if (hairpinq > allowed_max_hairpinq) {
1370 printf("Fail: input hairpin (%u) can't be greater "
1371 "than max_hairpin_queues (%u) of port %u\n",
1372 hairpinq, allowed_max_hairpinq, pid);
1382 struct rte_port *port;
1383 struct rte_mempool *mbp;
1384 unsigned int nb_mbuf_per_pool;
1386 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1387 struct rte_gro_param gro_param;
1394 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1396 /* Configuration of logical cores. */
1397 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1398 sizeof(struct fwd_lcore *) * nb_lcores,
1399 RTE_CACHE_LINE_SIZE);
1400 if (fwd_lcores == NULL) {
1401 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1402 "failed\n", nb_lcores);
1404 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1405 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1406 sizeof(struct fwd_lcore),
1407 RTE_CACHE_LINE_SIZE);
1408 if (fwd_lcores[lc_id] == NULL) {
1409 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1412 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1415 RTE_ETH_FOREACH_DEV(pid) {
1417 /* Apply default TxRx configuration for all ports */
1418 port->dev_conf.txmode = tx_mode;
1419 port->dev_conf.rxmode = rx_mode;
1421 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1423 rte_exit(EXIT_FAILURE,
1424 "rte_eth_dev_info_get() failed\n");
1426 if (!(port->dev_info.tx_offload_capa &
1427 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1428 port->dev_conf.txmode.offloads &=
1429 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1431 if (port_numa[pid] != NUMA_NO_CONFIG)
1432 port_per_socket[port_numa[pid]]++;
1434 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1437 * if socket_id is invalid,
1438 * set to the first available socket.
1440 if (check_socket_id(socket_id) < 0)
1441 socket_id = socket_ids[0];
1442 port_per_socket[socket_id]++;
1446 /* Apply Rx offloads configuration */
1447 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1448 port->rx_conf[k].offloads =
1449 port->dev_conf.rxmode.offloads;
1450 /* Apply Tx offloads configuration */
1451 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1452 port->tx_conf[k].offloads =
1453 port->dev_conf.txmode.offloads;
1455 /* set flag to initialize port/queue */
1456 port->need_reconfig = 1;
1457 port->need_reconfig_queues = 1;
1458 port->tx_metadata = 0;
1460 /* Check for maximum number of segments per MTU. Accordingly
1461 * update the mbuf data size.
1463 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1464 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1465 data_size = rx_mode.max_rx_pkt_len /
1466 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1468 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1470 mbuf_data_size = data_size +
1471 RTE_PKTMBUF_HEADROOM;
1478 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1482 * Create pools of mbuf.
1483 * If NUMA support is disabled, create a single pool of mbuf in
1484 * socket 0 memory by default.
1485 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1487 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1488 * nb_txd can be configured at run time.
1490 if (param_total_num_mbufs)
1491 nb_mbuf_per_pool = param_total_num_mbufs;
1493 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1494 (nb_lcores * mb_mempool_cache) +
1495 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1496 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1502 for (i = 0; i < num_sockets; i++)
1503 mempools[i] = mbuf_pool_create(mbuf_data_size,
1507 if (socket_num == UMA_NO_CONFIG)
1508 mempools[0] = mbuf_pool_create(mbuf_data_size,
1509 nb_mbuf_per_pool, 0);
1511 mempools[socket_num] = mbuf_pool_create
1519 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1520 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1522 * Records which Mbuf pool to use by each logical core, if needed.
1524 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1525 mbp = mbuf_pool_find(
1526 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1529 mbp = mbuf_pool_find(0);
1530 fwd_lcores[lc_id]->mbp = mbp;
1531 /* initialize GSO context */
1532 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1533 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1534 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1535 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1537 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1540 /* Configuration of packet forwarding streams. */
1541 if (init_fwd_streams() < 0)
1542 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1546 /* create a gro context for each lcore */
1547 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1548 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1549 gro_param.max_item_per_flow = MAX_PKT_BURST;
1550 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1551 gro_param.socket_id = rte_lcore_to_socket_id(
1552 fwd_lcores_cpuids[lc_id]);
1553 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1554 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1555 rte_exit(EXIT_FAILURE,
1556 "rte_gro_ctx_create() failed\n");
1560 #if defined RTE_LIBRTE_PMD_SOFTNIC
1561 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1562 RTE_ETH_FOREACH_DEV(pid) {
1564 const char *driver = port->dev_info.driver_name;
1566 if (strcmp(driver, "net_softnic") == 0)
1567 port->softport.fwd_lcore_arg = fwd_lcores;
1576 reconfig(portid_t new_port_id, unsigned socket_id)
1578 struct rte_port *port;
1581 /* Reconfiguration of Ethernet ports. */
1582 port = &ports[new_port_id];
1584 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1588 /* set flag to initialize port/queue */
1589 port->need_reconfig = 1;
1590 port->need_reconfig_queues = 1;
1591 port->socket_id = socket_id;
1598 init_fwd_streams(void)
1601 struct rte_port *port;
1602 streamid_t sm_id, nb_fwd_streams_new;
1605 /* set socket id according to numa or not */
1606 RTE_ETH_FOREACH_DEV(pid) {
1608 if (nb_rxq > port->dev_info.max_rx_queues) {
1609 printf("Fail: nb_rxq(%d) is greater than "
1610 "max_rx_queues(%d)\n", nb_rxq,
1611 port->dev_info.max_rx_queues);
1614 if (nb_txq > port->dev_info.max_tx_queues) {
1615 printf("Fail: nb_txq(%d) is greater than "
1616 "max_tx_queues(%d)\n", nb_txq,
1617 port->dev_info.max_tx_queues);
1621 if (port_numa[pid] != NUMA_NO_CONFIG)
1622 port->socket_id = port_numa[pid];
1624 port->socket_id = rte_eth_dev_socket_id(pid);
1627 * if socket_id is invalid,
1628 * set to the first available socket.
1630 if (check_socket_id(port->socket_id) < 0)
1631 port->socket_id = socket_ids[0];
1635 if (socket_num == UMA_NO_CONFIG)
1636 port->socket_id = 0;
1638 port->socket_id = socket_num;
1642 q = RTE_MAX(nb_rxq, nb_txq);
1644 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1647 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1648 if (nb_fwd_streams_new == nb_fwd_streams)
1651 if (fwd_streams != NULL) {
1652 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1653 if (fwd_streams[sm_id] == NULL)
1655 rte_free(fwd_streams[sm_id]);
1656 fwd_streams[sm_id] = NULL;
1658 rte_free(fwd_streams);
1663 nb_fwd_streams = nb_fwd_streams_new;
1664 if (nb_fwd_streams) {
1665 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1666 sizeof(struct fwd_stream *) * nb_fwd_streams,
1667 RTE_CACHE_LINE_SIZE);
1668 if (fwd_streams == NULL)
1669 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1670 " (struct fwd_stream *)) failed\n",
1673 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1674 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1675 " struct fwd_stream", sizeof(struct fwd_stream),
1676 RTE_CACHE_LINE_SIZE);
1677 if (fwd_streams[sm_id] == NULL)
1678 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1679 "(struct fwd_stream) failed\n");
1686 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1688 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1690 unsigned int total_burst;
1691 unsigned int nb_burst;
1692 unsigned int burst_stats[3];
1693 uint16_t pktnb_stats[3];
1695 int burst_percent[3];
1698 * First compute the total number of packet bursts and the
1699 * two highest numbers of bursts of the same number of packets.
1702 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1703 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1704 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1705 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1708 total_burst += nb_burst;
1709 if (nb_burst > burst_stats[0]) {
1710 burst_stats[1] = burst_stats[0];
1711 pktnb_stats[1] = pktnb_stats[0];
1712 burst_stats[0] = nb_burst;
1713 pktnb_stats[0] = nb_pkt;
1714 } else if (nb_burst > burst_stats[1]) {
1715 burst_stats[1] = nb_burst;
1716 pktnb_stats[1] = nb_pkt;
1719 if (total_burst == 0)
1721 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1722 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1723 burst_percent[0], (int) pktnb_stats[0]);
1724 if (burst_stats[0] == total_burst) {
1728 if (burst_stats[0] + burst_stats[1] == total_burst) {
1729 printf(" + %d%% of %d pkts]\n",
1730 100 - burst_percent[0], pktnb_stats[1]);
1733 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1734 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1735 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1736 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1739 printf(" + %d%% of %d pkts + %d%% of others]\n",
1740 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1742 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1745 fwd_stream_stats_display(streamid_t stream_id)
1747 struct fwd_stream *fs;
1748 static const char *fwd_top_stats_border = "-------";
1750 fs = fwd_streams[stream_id];
1751 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1752 (fs->fwd_dropped == 0))
1754 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1755 "TX Port=%2d/Queue=%2d %s\n",
1756 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1757 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1758 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1759 " TX-dropped: %-14"PRIu64,
1760 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1762 /* if checksum mode */
1763 if (cur_fwd_eng == &csum_fwd_engine) {
1764 printf(" RX- bad IP checksum: %-14"PRIu64
1765 " Rx- bad L4 checksum: %-14"PRIu64
1766 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1767 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1768 fs->rx_bad_outer_l4_csum);
1773 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1774 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1775 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1780 fwd_stats_display(void)
1782 static const char *fwd_stats_border = "----------------------";
1783 static const char *acc_stats_border = "+++++++++++++++";
1785 struct fwd_stream *rx_stream;
1786 struct fwd_stream *tx_stream;
1787 uint64_t tx_dropped;
1788 uint64_t rx_bad_ip_csum;
1789 uint64_t rx_bad_l4_csum;
1790 uint64_t rx_bad_outer_l4_csum;
1791 } ports_stats[RTE_MAX_ETHPORTS];
1792 uint64_t total_rx_dropped = 0;
1793 uint64_t total_tx_dropped = 0;
1794 uint64_t total_rx_nombuf = 0;
1795 struct rte_eth_stats stats;
1796 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1797 uint64_t fwd_cycles = 0;
1799 uint64_t total_recv = 0;
1800 uint64_t total_xmit = 0;
1801 struct rte_port *port;
1806 memset(ports_stats, 0, sizeof(ports_stats));
1808 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1809 struct fwd_stream *fs = fwd_streams[sm_id];
1811 if (cur_fwd_config.nb_fwd_streams >
1812 cur_fwd_config.nb_fwd_ports) {
1813 fwd_stream_stats_display(sm_id);
1815 ports_stats[fs->tx_port].tx_stream = fs;
1816 ports_stats[fs->rx_port].rx_stream = fs;
1819 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1821 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1822 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1823 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1824 fs->rx_bad_outer_l4_csum;
1826 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1827 fwd_cycles += fs->core_cycles;
1830 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1833 pt_id = fwd_ports_ids[i];
1834 port = &ports[pt_id];
1836 rte_eth_stats_get(pt_id, &stats);
1837 stats.ipackets -= port->stats.ipackets;
1838 stats.opackets -= port->stats.opackets;
1839 stats.ibytes -= port->stats.ibytes;
1840 stats.obytes -= port->stats.obytes;
1841 stats.imissed -= port->stats.imissed;
1842 stats.oerrors -= port->stats.oerrors;
1843 stats.rx_nombuf -= port->stats.rx_nombuf;
1845 total_recv += stats.ipackets;
1846 total_xmit += stats.opackets;
1847 total_rx_dropped += stats.imissed;
1848 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1849 total_tx_dropped += stats.oerrors;
1850 total_rx_nombuf += stats.rx_nombuf;
1852 printf("\n %s Forward statistics for port %-2d %s\n",
1853 fwd_stats_border, pt_id, fwd_stats_border);
1855 if (!port->rx_queue_stats_mapping_enabled &&
1856 !port->tx_queue_stats_mapping_enabled) {
1857 printf(" RX-packets: %-14"PRIu64
1858 " RX-dropped: %-14"PRIu64
1859 "RX-total: %-"PRIu64"\n",
1860 stats.ipackets, stats.imissed,
1861 stats.ipackets + stats.imissed);
1863 if (cur_fwd_eng == &csum_fwd_engine)
1864 printf(" Bad-ipcsum: %-14"PRIu64
1865 " Bad-l4csum: %-14"PRIu64
1866 "Bad-outer-l4csum: %-14"PRIu64"\n",
1867 ports_stats[pt_id].rx_bad_ip_csum,
1868 ports_stats[pt_id].rx_bad_l4_csum,
1869 ports_stats[pt_id].rx_bad_outer_l4_csum);
1870 if (stats.ierrors + stats.rx_nombuf > 0) {
1871 printf(" RX-error: %-"PRIu64"\n",
1873 printf(" RX-nombufs: %-14"PRIu64"\n",
1877 printf(" TX-packets: %-14"PRIu64
1878 " TX-dropped: %-14"PRIu64
1879 "TX-total: %-"PRIu64"\n",
1880 stats.opackets, ports_stats[pt_id].tx_dropped,
1881 stats.opackets + ports_stats[pt_id].tx_dropped);
1883 printf(" RX-packets: %14"PRIu64
1884 " RX-dropped:%14"PRIu64
1885 " RX-total:%14"PRIu64"\n",
1886 stats.ipackets, stats.imissed,
1887 stats.ipackets + stats.imissed);
1889 if (cur_fwd_eng == &csum_fwd_engine)
1890 printf(" Bad-ipcsum:%14"PRIu64
1891 " Bad-l4csum:%14"PRIu64
1892 " Bad-outer-l4csum: %-14"PRIu64"\n",
1893 ports_stats[pt_id].rx_bad_ip_csum,
1894 ports_stats[pt_id].rx_bad_l4_csum,
1895 ports_stats[pt_id].rx_bad_outer_l4_csum);
1896 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1897 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1898 printf(" RX-nombufs: %14"PRIu64"\n",
1902 printf(" TX-packets: %14"PRIu64
1903 " TX-dropped:%14"PRIu64
1904 " TX-total:%14"PRIu64"\n",
1905 stats.opackets, ports_stats[pt_id].tx_dropped,
1906 stats.opackets + ports_stats[pt_id].tx_dropped);
1909 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1910 if (ports_stats[pt_id].rx_stream)
1911 pkt_burst_stats_display("RX",
1912 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1913 if (ports_stats[pt_id].tx_stream)
1914 pkt_burst_stats_display("TX",
1915 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1918 if (port->rx_queue_stats_mapping_enabled) {
1920 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1921 printf(" Stats reg %2d RX-packets:%14"PRIu64
1922 " RX-errors:%14"PRIu64
1923 " RX-bytes:%14"PRIu64"\n",
1924 j, stats.q_ipackets[j],
1925 stats.q_errors[j], stats.q_ibytes[j]);
1929 if (port->tx_queue_stats_mapping_enabled) {
1930 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1931 printf(" Stats reg %2d TX-packets:%14"PRIu64
1934 j, stats.q_opackets[j],
1939 printf(" %s--------------------------------%s\n",
1940 fwd_stats_border, fwd_stats_border);
1943 printf("\n %s Accumulated forward statistics for all ports"
1945 acc_stats_border, acc_stats_border);
1946 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1948 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1950 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1951 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1952 if (total_rx_nombuf > 0)
1953 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1954 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1956 acc_stats_border, acc_stats_border);
1957 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1959 printf("\n CPU cycles/packet=%u (total cycles="
1960 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1961 (unsigned int)(fwd_cycles / total_recv),
1962 fwd_cycles, total_recv);
1967 fwd_stats_reset(void)
1973 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1974 pt_id = fwd_ports_ids[i];
1975 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1977 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1978 struct fwd_stream *fs = fwd_streams[sm_id];
1982 fs->fwd_dropped = 0;
1983 fs->rx_bad_ip_csum = 0;
1984 fs->rx_bad_l4_csum = 0;
1985 fs->rx_bad_outer_l4_csum = 0;
1987 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1988 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1989 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1991 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1992 fs->core_cycles = 0;
1998 flush_fwd_rx_queues(void)
2000 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2007 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2008 uint64_t timer_period;
2010 /* convert to number of cycles */
2011 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2013 for (j = 0; j < 2; j++) {
2014 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2015 for (rxq = 0; rxq < nb_rxq; rxq++) {
2016 port_id = fwd_ports_ids[rxp];
2018 * testpmd can stuck in the below do while loop
2019 * if rte_eth_rx_burst() always returns nonzero
2020 * packets. So timer is added to exit this loop
2021 * after 1sec timer expiry.
2023 prev_tsc = rte_rdtsc();
2025 nb_rx = rte_eth_rx_burst(port_id, rxq,
2026 pkts_burst, MAX_PKT_BURST);
2027 for (i = 0; i < nb_rx; i++)
2028 rte_pktmbuf_free(pkts_burst[i]);
2030 cur_tsc = rte_rdtsc();
2031 diff_tsc = cur_tsc - prev_tsc;
2032 timer_tsc += diff_tsc;
2033 } while ((nb_rx > 0) &&
2034 (timer_tsc < timer_period));
2038 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2043 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2045 struct fwd_stream **fsm;
2048 #ifdef RTE_LIBRTE_BITRATE
2049 uint64_t tics_per_1sec;
2050 uint64_t tics_datum;
2051 uint64_t tics_current;
2052 uint16_t i, cnt_ports;
2054 cnt_ports = nb_ports;
2055 tics_datum = rte_rdtsc();
2056 tics_per_1sec = rte_get_timer_hz();
2058 fsm = &fwd_streams[fc->stream_idx];
2059 nb_fs = fc->stream_nb;
2061 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2062 (*pkt_fwd)(fsm[sm_id]);
2063 #ifdef RTE_LIBRTE_BITRATE
2064 if (bitrate_enabled != 0 &&
2065 bitrate_lcore_id == rte_lcore_id()) {
2066 tics_current = rte_rdtsc();
2067 if (tics_current - tics_datum >= tics_per_1sec) {
2068 /* Periodic bitrate calculation */
2069 for (i = 0; i < cnt_ports; i++)
2070 rte_stats_bitrate_calc(bitrate_data,
2072 tics_datum = tics_current;
2076 #ifdef RTE_LIBRTE_LATENCY_STATS
2077 if (latencystats_enabled != 0 &&
2078 latencystats_lcore_id == rte_lcore_id())
2079 rte_latencystats_update();
2082 } while (! fc->stopped);
2086 start_pkt_forward_on_core(void *fwd_arg)
2088 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2089 cur_fwd_config.fwd_eng->packet_fwd);
2094 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2095 * Used to start communication flows in network loopback test configurations.
2098 run_one_txonly_burst_on_core(void *fwd_arg)
2100 struct fwd_lcore *fwd_lc;
2101 struct fwd_lcore tmp_lcore;
2103 fwd_lc = (struct fwd_lcore *) fwd_arg;
2104 tmp_lcore = *fwd_lc;
2105 tmp_lcore.stopped = 1;
2106 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2111 * Launch packet forwarding:
2112 * - Setup per-port forwarding context.
2113 * - launch logical cores with their forwarding configuration.
2116 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2118 port_fwd_begin_t port_fwd_begin;
2123 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2124 if (port_fwd_begin != NULL) {
2125 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2126 (*port_fwd_begin)(fwd_ports_ids[i]);
2128 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2129 lc_id = fwd_lcores_cpuids[i];
2130 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2131 fwd_lcores[i]->stopped = 0;
2132 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2133 fwd_lcores[i], lc_id);
2135 printf("launch lcore %u failed - diag=%d\n",
2142 * Launch packet forwarding configuration.
2145 start_packet_forwarding(int with_tx_first)
2147 port_fwd_begin_t port_fwd_begin;
2148 port_fwd_end_t port_fwd_end;
2149 struct rte_port *port;
2153 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2154 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2156 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2157 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2159 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2160 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2161 (!nb_rxq || !nb_txq))
2162 rte_exit(EXIT_FAILURE,
2163 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2164 cur_fwd_eng->fwd_mode_name);
2166 if (all_ports_started() == 0) {
2167 printf("Not all ports were started\n");
2170 if (test_done == 0) {
2171 printf("Packet forwarding already started\n");
2177 for (i = 0; i < nb_fwd_ports; i++) {
2178 pt_id = fwd_ports_ids[i];
2179 port = &ports[pt_id];
2180 if (!port->dcb_flag) {
2181 printf("In DCB mode, all forwarding ports must "
2182 "be configured in this mode.\n");
2186 if (nb_fwd_lcores == 1) {
2187 printf("In DCB mode,the nb forwarding cores "
2188 "should be larger than 1.\n");
2197 flush_fwd_rx_queues();
2199 pkt_fwd_config_display(&cur_fwd_config);
2200 rxtx_config_display();
2203 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2204 pt_id = fwd_ports_ids[i];
2205 port = &ports[pt_id];
2206 map_port_queue_stats_mapping_registers(pt_id, port);
2208 if (with_tx_first) {
2209 port_fwd_begin = tx_only_engine.port_fwd_begin;
2210 if (port_fwd_begin != NULL) {
2211 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2212 (*port_fwd_begin)(fwd_ports_ids[i]);
2214 while (with_tx_first--) {
2215 launch_packet_forwarding(
2216 run_one_txonly_burst_on_core);
2217 rte_eal_mp_wait_lcore();
2219 port_fwd_end = tx_only_engine.port_fwd_end;
2220 if (port_fwd_end != NULL) {
2221 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2222 (*port_fwd_end)(fwd_ports_ids[i]);
2225 launch_packet_forwarding(start_pkt_forward_on_core);
2229 stop_packet_forwarding(void)
2231 port_fwd_end_t port_fwd_end;
2237 printf("Packet forwarding not started\n");
2240 printf("Telling cores to stop...");
2241 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2242 fwd_lcores[lc_id]->stopped = 1;
2243 printf("\nWaiting for lcores to finish...\n");
2244 rte_eal_mp_wait_lcore();
2245 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2246 if (port_fwd_end != NULL) {
2247 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2248 pt_id = fwd_ports_ids[i];
2249 (*port_fwd_end)(pt_id);
2253 fwd_stats_display();
2255 printf("\nDone.\n");
2260 dev_set_link_up(portid_t pid)
2262 if (rte_eth_dev_set_link_up(pid) < 0)
2263 printf("\nSet link up fail.\n");
2267 dev_set_link_down(portid_t pid)
2269 if (rte_eth_dev_set_link_down(pid) < 0)
2270 printf("\nSet link down fail.\n");
2274 all_ports_started(void)
2277 struct rte_port *port;
2279 RTE_ETH_FOREACH_DEV(pi) {
2281 /* Check if there is a port which is not started */
2282 if ((port->port_status != RTE_PORT_STARTED) &&
2283 (port->slave_flag == 0))
2287 /* No port is not started */
2292 port_is_stopped(portid_t port_id)
2294 struct rte_port *port = &ports[port_id];
2296 if ((port->port_status != RTE_PORT_STOPPED) &&
2297 (port->slave_flag == 0))
2303 all_ports_stopped(void)
2307 RTE_ETH_FOREACH_DEV(pi) {
2308 if (!port_is_stopped(pi))
2316 port_is_started(portid_t port_id)
2318 if (port_id_is_invalid(port_id, ENABLED_WARN))
2321 if (ports[port_id].port_status != RTE_PORT_STARTED)
2327 /* Configure the Rx and Tx hairpin queues for the selected port. */
2329 setup_hairpin_queues(portid_t pi)
2332 struct rte_eth_hairpin_conf hairpin_conf = {
2337 struct rte_port *port = &ports[pi];
2339 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2340 hairpin_conf.peers[0].port = pi;
2341 hairpin_conf.peers[0].queue = i + nb_rxq;
2342 diag = rte_eth_tx_hairpin_queue_setup
2343 (pi, qi, nb_txd, &hairpin_conf);
2348 /* Fail to setup rx queue, return */
2349 if (rte_atomic16_cmpset(&(port->port_status),
2351 RTE_PORT_STOPPED) == 0)
2352 printf("Port %d can not be set back "
2353 "to stopped\n", pi);
2354 printf("Fail to configure port %d hairpin "
2356 /* try to reconfigure queues next time */
2357 port->need_reconfig_queues = 1;
2360 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2361 hairpin_conf.peers[0].port = pi;
2362 hairpin_conf.peers[0].queue = i + nb_txq;
2363 diag = rte_eth_rx_hairpin_queue_setup
2364 (pi, qi, nb_rxd, &hairpin_conf);
2369 /* Fail to setup rx queue, return */
2370 if (rte_atomic16_cmpset(&(port->port_status),
2372 RTE_PORT_STOPPED) == 0)
2373 printf("Port %d can not be set back "
2374 "to stopped\n", pi);
2375 printf("Fail to configure port %d hairpin "
2377 /* try to reconfigure queues next time */
2378 port->need_reconfig_queues = 1;
2385 start_port(portid_t pid)
2387 int diag, need_check_link_status = -1;
2390 struct rte_port *port;
2391 struct rte_ether_addr mac_addr;
2392 struct rte_eth_hairpin_cap cap;
2394 if (port_id_is_invalid(pid, ENABLED_WARN))
2399 RTE_ETH_FOREACH_DEV(pi) {
2400 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2403 need_check_link_status = 0;
2405 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2406 RTE_PORT_HANDLING) == 0) {
2407 printf("Port %d is now not stopped\n", pi);
2411 if (port->need_reconfig > 0) {
2412 port->need_reconfig = 0;
2414 if (flow_isolate_all) {
2415 int ret = port_flow_isolate(pi, 1);
2417 printf("Failed to apply isolated"
2418 " mode on port %d\n", pi);
2422 configure_rxtx_dump_callbacks(0);
2423 printf("Configuring Port %d (socket %u)\n", pi,
2425 if (nb_hairpinq > 0 &&
2426 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2427 printf("Port %d doesn't support hairpin "
2431 /* configure port */
2432 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2433 nb_txq + nb_hairpinq,
2436 if (rte_atomic16_cmpset(&(port->port_status),
2437 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2438 printf("Port %d can not be set back "
2439 "to stopped\n", pi);
2440 printf("Fail to configure port %d\n", pi);
2441 /* try to reconfigure port next time */
2442 port->need_reconfig = 1;
2446 if (port->need_reconfig_queues > 0) {
2447 port->need_reconfig_queues = 0;
2448 /* setup tx queues */
2449 for (qi = 0; qi < nb_txq; qi++) {
2450 if ((numa_support) &&
2451 (txring_numa[pi] != NUMA_NO_CONFIG))
2452 diag = rte_eth_tx_queue_setup(pi, qi,
2453 port->nb_tx_desc[qi],
2455 &(port->tx_conf[qi]));
2457 diag = rte_eth_tx_queue_setup(pi, qi,
2458 port->nb_tx_desc[qi],
2460 &(port->tx_conf[qi]));
2465 /* Fail to setup tx queue, return */
2466 if (rte_atomic16_cmpset(&(port->port_status),
2468 RTE_PORT_STOPPED) == 0)
2469 printf("Port %d can not be set back "
2470 "to stopped\n", pi);
2471 printf("Fail to configure port %d tx queues\n",
2473 /* try to reconfigure queues next time */
2474 port->need_reconfig_queues = 1;
2477 for (qi = 0; qi < nb_rxq; qi++) {
2478 /* setup rx queues */
2479 if ((numa_support) &&
2480 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2481 struct rte_mempool * mp =
2482 mbuf_pool_find(rxring_numa[pi]);
2484 printf("Failed to setup RX queue:"
2485 "No mempool allocation"
2486 " on the socket %d\n",
2491 diag = rte_eth_rx_queue_setup(pi, qi,
2492 port->nb_rx_desc[qi],
2494 &(port->rx_conf[qi]),
2497 struct rte_mempool *mp =
2498 mbuf_pool_find(port->socket_id);
2500 printf("Failed to setup RX queue:"
2501 "No mempool allocation"
2502 " on the socket %d\n",
2506 diag = rte_eth_rx_queue_setup(pi, qi,
2507 port->nb_rx_desc[qi],
2509 &(port->rx_conf[qi]),
2515 /* Fail to setup rx queue, return */
2516 if (rte_atomic16_cmpset(&(port->port_status),
2518 RTE_PORT_STOPPED) == 0)
2519 printf("Port %d can not be set back "
2520 "to stopped\n", pi);
2521 printf("Fail to configure port %d rx queues\n",
2523 /* try to reconfigure queues next time */
2524 port->need_reconfig_queues = 1;
2527 /* setup hairpin queues */
2528 if (setup_hairpin_queues(pi) != 0)
2531 configure_rxtx_dump_callbacks(verbose_level);
2533 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2537 "Port %d: Failed to disable Ptype parsing\n",
2542 if (rte_eth_dev_start(pi) < 0) {
2543 printf("Fail to start port %d\n", pi);
2545 /* Fail to setup rx queue, return */
2546 if (rte_atomic16_cmpset(&(port->port_status),
2547 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2548 printf("Port %d can not be set back to "
2553 if (rte_atomic16_cmpset(&(port->port_status),
2554 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2555 printf("Port %d can not be set into started\n", pi);
2557 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2558 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2559 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2560 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2561 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2563 /* at least one port started, need checking link status */
2564 need_check_link_status = 1;
2567 if (need_check_link_status == 1 && !no_link_check)
2568 check_all_ports_link_status(RTE_PORT_ALL);
2569 else if (need_check_link_status == 0)
2570 printf("Please stop the ports first\n");
2577 stop_port(portid_t pid)
2580 struct rte_port *port;
2581 int need_check_link_status = 0;
2588 if (port_id_is_invalid(pid, ENABLED_WARN))
2591 printf("Stopping ports...\n");
2593 RTE_ETH_FOREACH_DEV(pi) {
2594 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2597 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2598 printf("Please remove port %d from forwarding configuration.\n", pi);
2602 if (port_is_bonding_slave(pi)) {
2603 printf("Please remove port %d from bonded device.\n", pi);
2608 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2609 RTE_PORT_HANDLING) == 0)
2612 rte_eth_dev_stop(pi);
2614 if (rte_atomic16_cmpset(&(port->port_status),
2615 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2616 printf("Port %d can not be set into stopped\n", pi);
2617 need_check_link_status = 1;
2619 if (need_check_link_status && !no_link_check)
2620 check_all_ports_link_status(RTE_PORT_ALL);
2626 remove_invalid_ports_in(portid_t *array, portid_t *total)
2629 portid_t new_total = 0;
2631 for (i = 0; i < *total; i++)
2632 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2633 array[new_total] = array[i];
2640 remove_invalid_ports(void)
2642 remove_invalid_ports_in(ports_ids, &nb_ports);
2643 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2644 nb_cfg_ports = nb_fwd_ports;
2648 close_port(portid_t pid)
2651 struct rte_port *port;
2653 if (port_id_is_invalid(pid, ENABLED_WARN))
2656 printf("Closing ports...\n");
2658 RTE_ETH_FOREACH_DEV(pi) {
2659 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2662 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2663 printf("Please remove port %d from forwarding configuration.\n", pi);
2667 if (port_is_bonding_slave(pi)) {
2668 printf("Please remove port %d from bonded device.\n", pi);
2673 if (rte_atomic16_cmpset(&(port->port_status),
2674 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2675 printf("Port %d is already closed\n", pi);
2679 if (rte_atomic16_cmpset(&(port->port_status),
2680 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2681 printf("Port %d is now not stopped\n", pi);
2685 if (port->flow_list)
2686 port_flow_flush(pi);
2687 rte_eth_dev_close(pi);
2689 remove_invalid_ports();
2691 if (rte_atomic16_cmpset(&(port->port_status),
2692 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2693 printf("Port %d cannot be set to closed\n", pi);
2700 reset_port(portid_t pid)
2704 struct rte_port *port;
2706 if (port_id_is_invalid(pid, ENABLED_WARN))
2709 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2710 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2711 printf("Can not reset port(s), please stop port(s) first.\n");
2715 printf("Resetting ports...\n");
2717 RTE_ETH_FOREACH_DEV(pi) {
2718 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2721 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2722 printf("Please remove port %d from forwarding "
2723 "configuration.\n", pi);
2727 if (port_is_bonding_slave(pi)) {
2728 printf("Please remove port %d from bonded device.\n",
2733 diag = rte_eth_dev_reset(pi);
2736 port->need_reconfig = 1;
2737 port->need_reconfig_queues = 1;
2739 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2747 attach_port(char *identifier)
2750 struct rte_dev_iterator iterator;
2752 printf("Attaching a new port...\n");
2754 if (identifier == NULL) {
2755 printf("Invalid parameters are specified\n");
2759 if (rte_dev_probe(identifier) < 0) {
2760 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2764 /* first attach mode: event */
2765 if (setup_on_probe_event) {
2766 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2767 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2768 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2769 ports[pi].need_setup != 0)
2770 setup_attached_port(pi);
2774 /* second attach mode: iterator */
2775 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2776 /* setup ports matching the devargs used for probing */
2777 if (port_is_forwarding(pi))
2778 continue; /* port was already attached before */
2779 setup_attached_port(pi);
2784 setup_attached_port(portid_t pi)
2786 unsigned int socket_id;
2789 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2790 /* if socket_id is invalid, set to the first available socket. */
2791 if (check_socket_id(socket_id) < 0)
2792 socket_id = socket_ids[0];
2793 reconfig(pi, socket_id);
2794 ret = rte_eth_promiscuous_enable(pi);
2796 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2797 pi, rte_strerror(-ret));
2799 ports_ids[nb_ports++] = pi;
2800 fwd_ports_ids[nb_fwd_ports++] = pi;
2801 nb_cfg_ports = nb_fwd_ports;
2802 ports[pi].need_setup = 0;
2803 ports[pi].port_status = RTE_PORT_STOPPED;
2805 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2810 detach_device(struct rte_device *dev)
2815 printf("Device already removed\n");
2819 printf("Removing a device...\n");
2821 if (rte_dev_remove(dev) < 0) {
2822 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2825 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2826 /* reset mapping between old ports and removed device */
2827 rte_eth_devices[sibling].device = NULL;
2828 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2829 /* sibling ports are forced to be closed */
2830 ports[sibling].port_status = RTE_PORT_CLOSED;
2831 printf("Port %u is closed\n", sibling);
2835 remove_invalid_ports();
2837 printf("Device is detached\n");
2838 printf("Now total ports is %d\n", nb_ports);
2844 detach_port_device(portid_t port_id)
2846 if (port_id_is_invalid(port_id, ENABLED_WARN))
2849 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2850 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2851 printf("Port not stopped\n");
2854 printf("Port was not closed\n");
2855 if (ports[port_id].flow_list)
2856 port_flow_flush(port_id);
2859 detach_device(rte_eth_devices[port_id].device);
2863 detach_devargs(char *identifier)
2865 struct rte_dev_iterator iterator;
2866 struct rte_devargs da;
2869 printf("Removing a device...\n");
2871 memset(&da, 0, sizeof(da));
2872 if (rte_devargs_parsef(&da, "%s", identifier)) {
2873 printf("cannot parse identifier\n");
2879 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2880 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2881 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2882 printf("Port %u not stopped\n", port_id);
2883 rte_eth_iterator_cleanup(&iterator);
2887 /* sibling ports are forced to be closed */
2888 if (ports[port_id].flow_list)
2889 port_flow_flush(port_id);
2890 ports[port_id].port_status = RTE_PORT_CLOSED;
2891 printf("Port %u is now closed\n", port_id);
2895 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2896 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2897 da.name, da.bus->name);
2901 remove_invalid_ports();
2903 printf("Device %s is detached\n", identifier);
2904 printf("Now total ports is %d\n", nb_ports);
2916 stop_packet_forwarding();
2918 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2920 if (mp_alloc_type == MP_ALLOC_ANON)
2921 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2925 if (ports != NULL) {
2927 RTE_ETH_FOREACH_DEV(pt_id) {
2928 printf("\nStopping port %d...\n", pt_id);
2932 RTE_ETH_FOREACH_DEV(pt_id) {
2933 printf("\nShutting down port %d...\n", pt_id);
2940 ret = rte_dev_event_monitor_stop();
2943 "fail to stop device event monitor.");
2947 ret = rte_dev_event_callback_unregister(NULL,
2948 dev_event_callback, NULL);
2951 "fail to unregister device event callback.\n");
2955 ret = rte_dev_hotplug_handle_disable();
2958 "fail to disable hotplug handling.\n");
2962 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2964 rte_mempool_free(mempools[i]);
2967 printf("\nBye...\n");
2970 typedef void (*cmd_func_t)(void);
2971 struct pmd_test_command {
2972 const char *cmd_name;
2973 cmd_func_t cmd_func;
2976 /* Check the link status of all ports in up to 9s, and print them finally */
2978 check_all_ports_link_status(uint32_t port_mask)
2980 #define CHECK_INTERVAL 100 /* 100ms */
2981 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2983 uint8_t count, all_ports_up, print_flag = 0;
2984 struct rte_eth_link link;
2987 printf("Checking link statuses...\n");
2989 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2991 RTE_ETH_FOREACH_DEV(portid) {
2992 if ((port_mask & (1 << portid)) == 0)
2994 memset(&link, 0, sizeof(link));
2995 ret = rte_eth_link_get_nowait(portid, &link);
2998 if (print_flag == 1)
2999 printf("Port %u link get failed: %s\n",
3000 portid, rte_strerror(-ret));
3003 /* print link status if flag set */
3004 if (print_flag == 1) {
3005 if (link.link_status)
3007 "Port%d Link Up. speed %u Mbps- %s\n",
3008 portid, link.link_speed,
3009 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3010 ("full-duplex") : ("half-duplex\n"));
3012 printf("Port %d Link Down\n", portid);
3015 /* clear all_ports_up flag if any link down */
3016 if (link.link_status == ETH_LINK_DOWN) {
3021 /* after finally printing all link status, get out */
3022 if (print_flag == 1)
3025 if (all_ports_up == 0) {
3027 rte_delay_ms(CHECK_INTERVAL);
3030 /* set the print_flag if all ports up or timeout */
3031 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3041 * This callback is for remove a port for a device. It has limitation because
3042 * it is not for multiple port removal for a device.
3043 * TODO: the device detach invoke will plan to be removed from user side to
3044 * eal. And convert all PMDs to free port resources on ether device closing.
3047 rmv_port_callback(void *arg)
3049 int need_to_start = 0;
3050 int org_no_link_check = no_link_check;
3051 portid_t port_id = (intptr_t)arg;
3052 struct rte_device *dev;
3054 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3056 if (!test_done && port_is_forwarding(port_id)) {
3058 stop_packet_forwarding();
3062 no_link_check = org_no_link_check;
3064 /* Save rte_device pointer before closing ethdev port */
3065 dev = rte_eth_devices[port_id].device;
3066 close_port(port_id);
3067 detach_device(dev); /* might be already removed or have more ports */
3070 start_packet_forwarding(0);
3073 /* This function is used by the interrupt thread */
3075 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3078 RTE_SET_USED(param);
3079 RTE_SET_USED(ret_param);
3081 if (type >= RTE_ETH_EVENT_MAX) {
3082 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3083 port_id, __func__, type);
3085 } else if (event_print_mask & (UINT32_C(1) << type)) {
3086 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3087 eth_event_desc[type]);
3092 case RTE_ETH_EVENT_NEW:
3093 ports[port_id].need_setup = 1;
3094 ports[port_id].port_status = RTE_PORT_HANDLING;
3096 case RTE_ETH_EVENT_INTR_RMV:
3097 if (port_id_is_invalid(port_id, DISABLED_WARN))
3099 if (rte_eal_alarm_set(100000,
3100 rmv_port_callback, (void *)(intptr_t)port_id))
3101 fprintf(stderr, "Could not set up deferred device removal\n");
3110 register_eth_event_callback(void)
3113 enum rte_eth_event_type event;
3115 for (event = RTE_ETH_EVENT_UNKNOWN;
3116 event < RTE_ETH_EVENT_MAX; event++) {
3117 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3122 TESTPMD_LOG(ERR, "Failed to register callback for "
3123 "%s event\n", eth_event_desc[event]);
3131 /* This function is used by the interrupt thread */
3133 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3134 __rte_unused void *arg)
3139 if (type >= RTE_DEV_EVENT_MAX) {
3140 fprintf(stderr, "%s called upon invalid event %d\n",
3146 case RTE_DEV_EVENT_REMOVE:
3147 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3149 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3151 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3156 * Because the user's callback is invoked in eal interrupt
3157 * callback, the interrupt callback need to be finished before
3158 * it can be unregistered when detaching device. So finish
3159 * callback soon and use a deferred removal to detach device
3160 * is need. It is a workaround, once the device detaching be
3161 * moved into the eal in the future, the deferred removal could
3164 if (rte_eal_alarm_set(100000,
3165 rmv_port_callback, (void *)(intptr_t)port_id))
3167 "Could not set up deferred device removal\n");
3169 case RTE_DEV_EVENT_ADD:
3170 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3172 /* TODO: After finish kernel driver binding,
3173 * begin to attach port.
3182 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3186 uint8_t mapping_found = 0;
3188 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3189 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3190 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3191 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3192 tx_queue_stats_mappings[i].queue_id,
3193 tx_queue_stats_mappings[i].stats_counter_id);
3200 port->tx_queue_stats_mapping_enabled = 1;
3205 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3209 uint8_t mapping_found = 0;
3211 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3212 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3213 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3214 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3215 rx_queue_stats_mappings[i].queue_id,
3216 rx_queue_stats_mappings[i].stats_counter_id);
3223 port->rx_queue_stats_mapping_enabled = 1;
3228 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3232 diag = set_tx_queue_stats_mapping_registers(pi, port);
3234 if (diag == -ENOTSUP) {
3235 port->tx_queue_stats_mapping_enabled = 0;
3236 printf("TX queue stats mapping not supported port id=%d\n", pi);
3239 rte_exit(EXIT_FAILURE,
3240 "set_tx_queue_stats_mapping_registers "
3241 "failed for port id=%d diag=%d\n",
3245 diag = set_rx_queue_stats_mapping_registers(pi, port);
3247 if (diag == -ENOTSUP) {
3248 port->rx_queue_stats_mapping_enabled = 0;
3249 printf("RX queue stats mapping not supported port id=%d\n", pi);
3252 rte_exit(EXIT_FAILURE,
3253 "set_rx_queue_stats_mapping_registers "
3254 "failed for port id=%d diag=%d\n",
3260 rxtx_port_config(struct rte_port *port)
3265 for (qid = 0; qid < nb_rxq; qid++) {
3266 offloads = port->rx_conf[qid].offloads;
3267 port->rx_conf[qid] = port->dev_info.default_rxconf;
3269 port->rx_conf[qid].offloads = offloads;
3271 /* Check if any Rx parameters have been passed */
3272 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3273 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3275 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3276 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3278 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3279 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3281 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3282 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3284 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3285 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3287 port->nb_rx_desc[qid] = nb_rxd;
3290 for (qid = 0; qid < nb_txq; qid++) {
3291 offloads = port->tx_conf[qid].offloads;
3292 port->tx_conf[qid] = port->dev_info.default_txconf;
3294 port->tx_conf[qid].offloads = offloads;
3296 /* Check if any Tx parameters have been passed */
3297 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3298 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3300 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3301 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3303 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3304 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3306 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3307 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3309 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3310 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3312 port->nb_tx_desc[qid] = nb_txd;
3317 init_port_config(void)
3320 struct rte_port *port;
3323 RTE_ETH_FOREACH_DEV(pid) {
3325 port->dev_conf.fdir_conf = fdir_conf;
3327 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3332 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3333 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3334 rss_hf & port->dev_info.flow_type_rss_offloads;
3336 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3337 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3340 if (port->dcb_flag == 0) {
3341 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3342 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
3344 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3347 rxtx_port_config(port);
3349 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3353 map_port_queue_stats_mapping_registers(pid, port);
3354 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3355 rte_pmd_ixgbe_bypass_init(pid);
3358 if (lsc_interrupt &&
3359 (rte_eth_devices[pid].data->dev_flags &
3360 RTE_ETH_DEV_INTR_LSC))
3361 port->dev_conf.intr_conf.lsc = 1;
3362 if (rmv_interrupt &&
3363 (rte_eth_devices[pid].data->dev_flags &
3364 RTE_ETH_DEV_INTR_RMV))
3365 port->dev_conf.intr_conf.rmv = 1;
3369 void set_port_slave_flag(portid_t slave_pid)
3371 struct rte_port *port;
3373 port = &ports[slave_pid];
3374 port->slave_flag = 1;
3377 void clear_port_slave_flag(portid_t slave_pid)
3379 struct rte_port *port;
3381 port = &ports[slave_pid];
3382 port->slave_flag = 0;
3385 uint8_t port_is_bonding_slave(portid_t slave_pid)
3387 struct rte_port *port;
3389 port = &ports[slave_pid];
3390 if ((rte_eth_devices[slave_pid].data->dev_flags &
3391 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3396 const uint16_t vlan_tags[] = {
3397 0, 1, 2, 3, 4, 5, 6, 7,
3398 8, 9, 10, 11, 12, 13, 14, 15,
3399 16, 17, 18, 19, 20, 21, 22, 23,
3400 24, 25, 26, 27, 28, 29, 30, 31
3404 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3405 enum dcb_mode_enable dcb_mode,
3406 enum rte_eth_nb_tcs num_tcs,
3411 struct rte_eth_rss_conf rss_conf;
3414 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3415 * given above, and the number of traffic classes available for use.
3417 if (dcb_mode == DCB_VT_ENABLED) {
3418 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3419 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3420 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3421 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3423 /* VMDQ+DCB RX and TX configurations */
3424 vmdq_rx_conf->enable_default_pool = 0;
3425 vmdq_rx_conf->default_pool = 0;
3426 vmdq_rx_conf->nb_queue_pools =
3427 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3428 vmdq_tx_conf->nb_queue_pools =
3429 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3431 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3432 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3433 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3434 vmdq_rx_conf->pool_map[i].pools =
3435 1 << (i % vmdq_rx_conf->nb_queue_pools);
3437 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3438 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3439 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3442 /* set DCB mode of RX and TX of multiple queues */
3443 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
3444 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3446 struct rte_eth_dcb_rx_conf *rx_conf =
3447 ð_conf->rx_adv_conf.dcb_rx_conf;
3448 struct rte_eth_dcb_tx_conf *tx_conf =
3449 ð_conf->tx_adv_conf.dcb_tx_conf;
3451 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3455 rx_conf->nb_tcs = num_tcs;
3456 tx_conf->nb_tcs = num_tcs;
3458 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3459 rx_conf->dcb_tc[i] = i % num_tcs;
3460 tx_conf->dcb_tc[i] = i % num_tcs;
3463 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3464 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3465 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3469 eth_conf->dcb_capability_en =
3470 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3472 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3478 init_port_dcb_config(portid_t pid,
3479 enum dcb_mode_enable dcb_mode,
3480 enum rte_eth_nb_tcs num_tcs,
3483 struct rte_eth_conf port_conf;
3484 struct rte_port *rte_port;
3488 rte_port = &ports[pid];
3490 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3491 /* Enter DCB configuration status */
3494 port_conf.rxmode = rte_port->dev_conf.rxmode;
3495 port_conf.txmode = rte_port->dev_conf.txmode;
3497 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3498 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3501 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3503 /* re-configure the device . */
3504 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3508 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3512 /* If dev_info.vmdq_pool_base is greater than 0,
3513 * the queue id of vmdq pools is started after pf queues.
3515 if (dcb_mode == DCB_VT_ENABLED &&
3516 rte_port->dev_info.vmdq_pool_base > 0) {
3517 printf("VMDQ_DCB multi-queue mode is nonsensical"
3518 " for port %d.", pid);
3522 /* Assume the ports in testpmd have the same dcb capability
3523 * and has the same number of rxq and txq in dcb mode
3525 if (dcb_mode == DCB_VT_ENABLED) {
3526 if (rte_port->dev_info.max_vfs > 0) {
3527 nb_rxq = rte_port->dev_info.nb_rx_queues;
3528 nb_txq = rte_port->dev_info.nb_tx_queues;
3530 nb_rxq = rte_port->dev_info.max_rx_queues;
3531 nb_txq = rte_port->dev_info.max_tx_queues;
3534 /*if vt is disabled, use all pf queues */
3535 if (rte_port->dev_info.vmdq_pool_base == 0) {
3536 nb_rxq = rte_port->dev_info.max_rx_queues;
3537 nb_txq = rte_port->dev_info.max_tx_queues;
3539 nb_rxq = (queueid_t)num_tcs;
3540 nb_txq = (queueid_t)num_tcs;
3544 rx_free_thresh = 64;
3546 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3548 rxtx_port_config(rte_port);
3550 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3551 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3552 rx_vft_set(pid, vlan_tags[i], 1);
3554 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3558 map_port_queue_stats_mapping_registers(pid, rte_port);
3560 rte_port->dcb_flag = 1;
3568 /* Configuration of Ethernet ports. */
3569 ports = rte_zmalloc("testpmd: ports",
3570 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3571 RTE_CACHE_LINE_SIZE);
3572 if (ports == NULL) {
3573 rte_exit(EXIT_FAILURE,
3574 "rte_zmalloc(%d struct rte_port) failed\n",
3578 /* Initialize ports NUMA structures */
3579 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3580 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3581 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3595 const char clr[] = { 27, '[', '2', 'J', '\0' };
3596 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3598 /* Clear screen and move to top left */
3599 printf("%s%s", clr, top_left);
3601 printf("\nPort statistics ====================================");
3602 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3603 nic_stats_display(fwd_ports_ids[i]);
3609 signal_handler(int signum)
3611 if (signum == SIGINT || signum == SIGTERM) {
3612 printf("\nSignal %d received, preparing to exit...\n",
3614 #ifdef RTE_LIBRTE_PDUMP
3615 /* uninitialize packet capture framework */
3618 #ifdef RTE_LIBRTE_LATENCY_STATS
3619 if (latencystats_enabled != 0)
3620 rte_latencystats_uninit();
3623 /* Set flag to indicate the force termination. */
3625 /* exit with the expected status */
3626 signal(signum, SIG_DFL);
3627 kill(getpid(), signum);
3632 main(int argc, char** argv)
3639 signal(SIGINT, signal_handler);
3640 signal(SIGTERM, signal_handler);
3642 testpmd_logtype = rte_log_register("testpmd");
3643 if (testpmd_logtype < 0)
3644 rte_exit(EXIT_FAILURE, "Cannot register log type");
3645 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3647 diag = rte_eal_init(argc, argv);
3649 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3650 rte_strerror(rte_errno));
3652 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3653 rte_exit(EXIT_FAILURE,
3654 "Secondary process type not supported.\n");
3656 ret = register_eth_event_callback();
3658 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3660 #ifdef RTE_LIBRTE_PDUMP
3661 /* initialize packet capture framework */
3666 RTE_ETH_FOREACH_DEV(port_id) {
3667 ports_ids[count] = port_id;
3670 nb_ports = (portid_t) count;
3672 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3674 /* allocate port structures, and init them */
3677 set_def_fwd_config();
3679 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3680 "Check the core mask argument\n");
3682 /* Bitrate/latency stats disabled by default */
3683 #ifdef RTE_LIBRTE_BITRATE
3684 bitrate_enabled = 0;
3686 #ifdef RTE_LIBRTE_LATENCY_STATS
3687 latencystats_enabled = 0;
3690 /* on FreeBSD, mlockall() is disabled by default */
3691 #ifdef RTE_EXEC_ENV_FREEBSD
3700 launch_args_parse(argc, argv);
3702 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3703 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3707 if (tx_first && interactive)
3708 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3709 "interactive mode.\n");
3711 if (tx_first && lsc_interrupt) {
3712 printf("Warning: lsc_interrupt needs to be off when "
3713 " using tx_first. Disabling.\n");
3717 if (!nb_rxq && !nb_txq)
3718 printf("Warning: Either rx or tx queues should be non-zero\n");
3720 if (nb_rxq > 1 && nb_rxq > nb_txq)
3721 printf("Warning: nb_rxq=%d enables RSS configuration, "
3722 "but nb_txq=%d will prevent to fully test it.\n",
3728 ret = rte_dev_hotplug_handle_enable();
3731 "fail to enable hotplug handling.");
3735 ret = rte_dev_event_monitor_start();
3738 "fail to start device event monitoring.");
3742 ret = rte_dev_event_callback_register(NULL,
3743 dev_event_callback, NULL);
3746 "fail to register device event callback\n");
3751 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3752 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3754 /* set all ports to promiscuous mode by default */
3755 RTE_ETH_FOREACH_DEV(port_id) {
3756 ret = rte_eth_promiscuous_enable(port_id);
3758 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3759 port_id, rte_strerror(-ret));
3762 /* Init metrics library */
3763 rte_metrics_init(rte_socket_id());
3765 #ifdef RTE_LIBRTE_LATENCY_STATS
3766 if (latencystats_enabled != 0) {
3767 int ret = rte_latencystats_init(1, NULL);
3769 printf("Warning: latencystats init()"
3770 " returned error %d\n", ret);
3771 printf("Latencystats running on lcore %d\n",
3772 latencystats_lcore_id);
3776 /* Setup bitrate stats */
3777 #ifdef RTE_LIBRTE_BITRATE
3778 if (bitrate_enabled != 0) {
3779 bitrate_data = rte_stats_bitrate_create();
3780 if (bitrate_data == NULL)
3781 rte_exit(EXIT_FAILURE,
3782 "Could not allocate bitrate data.\n");
3783 rte_stats_bitrate_reg(bitrate_data);
3787 #ifdef RTE_LIBRTE_CMDLINE
3788 if (strlen(cmdline_filename) != 0)
3789 cmdline_read_from_file(cmdline_filename);
3791 if (interactive == 1) {
3793 printf("Start automatic packet forwarding\n");
3794 start_packet_forwarding(0);
3806 printf("No commandline core given, start packet forwarding\n");
3807 start_packet_forwarding(tx_first);
3808 if (stats_period != 0) {
3809 uint64_t prev_time = 0, cur_time, diff_time = 0;
3810 uint64_t timer_period;
3812 /* Convert to number of cycles */
3813 timer_period = stats_period * rte_get_timer_hz();
3815 while (f_quit == 0) {
3816 cur_time = rte_get_timer_cycles();
3817 diff_time += cur_time - prev_time;
3819 if (diff_time >= timer_period) {
3821 /* Reset the timer */
3824 /* Sleep to avoid unnecessary checks */
3825 prev_time = cur_time;
3830 printf("Press enter to exit\n");
3831 rc = read(0, &c, 1);
3837 ret = rte_eal_cleanup();
3839 rte_exit(EXIT_FAILURE,
3840 "EAL cleanup failed: %s\n", strerror(-ret));
3842 return EXIT_SUCCESS;