1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 int testpmd_logtype; /**< Log type for testpmd logs */
85 /* use master core for command line ? */
86 uint8_t interactive = 0;
87 uint8_t auto_start = 0;
89 char cmdline_filename[PATH_MAX] = {0};
92 * NUMA support configuration.
93 * When set, the NUMA support attempts to dispatch the allocation of the
94 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95 * probed ports among the CPU sockets 0 and 1.
96 * Otherwise, all memory is allocated from CPU socket 0.
98 uint8_t numa_support = 1; /**< numa enabled by default */
101 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
104 uint8_t socket_num = UMA_NO_CONFIG;
107 * Select mempool allocation type:
108 * - native: use regular DPDK memory
109 * - anon: use regular DPDK memory to create mempool, but populate using
110 * anonymous memory (may not be IOVA-contiguous)
111 * - xmem: use externally allocated hugepage memory
113 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
116 * Store specified sockets on which memory pool to be used by ports
119 uint8_t port_numa[RTE_MAX_ETHPORTS];
122 * Store specified sockets on which RX ring to be used by ports
125 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which TX ring to be used by ports
131 uint8_t txring_numa[RTE_MAX_ETHPORTS];
134 * Record the Ethernet address of peer target ports to which packets are
136 * Must be instantiated with the ethernet addresses of peer traffic generator
139 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
140 portid_t nb_peer_eth_addrs = 0;
143 * Probed Target Environment.
145 struct rte_port *ports; /**< For all probed ethernet ports. */
146 portid_t nb_ports; /**< Number of probed ethernet ports. */
147 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
148 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
150 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
153 * Test Forwarding Configuration.
154 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
155 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
157 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
158 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
159 portid_t nb_cfg_ports; /**< Number of configured ports. */
160 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
162 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
163 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
165 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
166 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
169 * Forwarding engines.
171 struct fwd_engine * fwd_engines[] = {
181 #if defined RTE_LIBRTE_PMD_SOFTNIC
184 #ifdef RTE_LIBRTE_IEEE1588
185 &ieee1588_fwd_engine,
190 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
191 uint16_t mempool_flags;
193 struct fwd_config cur_fwd_config;
194 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
195 uint32_t retry_enabled;
196 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
197 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
199 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
200 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
201 * specified on command-line. */
202 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
205 * In container, it cannot terminate the process which running with 'stats-period'
206 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
211 * Configuration of packet segments used by the "txonly" processing engine.
213 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
214 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
215 TXONLY_DEF_PACKET_LEN,
217 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
219 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
220 /**< Split policy for packets to TX. */
222 uint8_t txonly_multi_flow;
223 /**< Whether multiple flows are generated in TXONLY mode. */
225 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
226 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
228 /* current configuration is in DCB or not,0 means it is not in DCB mode */
229 uint8_t dcb_config = 0;
231 /* Whether the dcb is in testing status */
232 uint8_t dcb_test = 0;
235 * Configurable number of RX/TX queues.
237 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
238 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
242 * Configurable number of RX/TX ring descriptors.
243 * Defaults are supplied by drivers via ethdev.
245 #define RTE_TEST_RX_DESC_DEFAULT 0
246 #define RTE_TEST_TX_DESC_DEFAULT 0
247 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250 #define RTE_PMD_PARAM_UNSET -1
252 * Configurable values of RX and TX ring threshold registers.
255 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
264 * Configurable value of RX free threshold.
266 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX drop enable.
271 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
274 * Configurable value of TX free threshold.
276 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX RS bit threshold.
281 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of buffered packets before sending.
286 uint16_t noisy_tx_sw_bufsz;
289 * Configurable value of packet buffer timeout.
291 uint16_t noisy_tx_sw_buf_flush_time;
294 * Configurable value for size of VNF internal memory area
295 * used for simulating noisy neighbour behaviour
297 uint64_t noisy_lkup_mem_sz;
300 * Configurable value of number of random writes done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_writes;
306 * Configurable value of number of random reads done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads;
312 * Configurable value of number of random reads/writes done in
313 * VNF simulation memory area.
315 uint64_t noisy_lkup_num_reads_writes;
318 * Receive Side Scaling (RSS) configuration.
320 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
323 * Port topology configuration
325 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
328 * Avoids to flush all the RX streams before starts forwarding.
330 uint8_t no_flush_rx = 0; /* flush by default */
333 * Flow API isolated mode.
335 uint8_t flow_isolate_all;
338 * Avoids to check link status when starting/stopping a port.
340 uint8_t no_link_check = 0; /* check by default */
343 * Don't automatically start all ports in interactive mode.
345 uint8_t no_device_start = 0;
348 * Enable link status change notification
350 uint8_t lsc_interrupt = 1; /* enabled by default */
353 * Enable device removal notification.
355 uint8_t rmv_interrupt = 1; /* enabled by default */
357 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
359 /* After attach, port setup is called on event or by iterator */
360 bool setup_on_probe_event = true;
362 /* Pretty printing of ethdev events */
363 static const char * const eth_event_desc[] = {
364 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
365 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
366 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
367 [RTE_ETH_EVENT_INTR_RESET] = "reset",
368 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
369 [RTE_ETH_EVENT_IPSEC] = "IPsec",
370 [RTE_ETH_EVENT_MACSEC] = "MACsec",
371 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
372 [RTE_ETH_EVENT_NEW] = "device probed",
373 [RTE_ETH_EVENT_DESTROY] = "device released",
374 [RTE_ETH_EVENT_MAX] = NULL,
378 * Display or mask ether events
379 * Default to all events except VF_MBOX
381 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
382 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
383 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
384 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
385 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
386 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
387 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
389 * Decide if all memory are locked for performance.
394 * NIC bypass mode configuration options.
397 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
398 /* The NIC bypass watchdog timeout. */
399 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
403 #ifdef RTE_LIBRTE_LATENCY_STATS
406 * Set when latency stats is enabled in the commandline
408 uint8_t latencystats_enabled;
411 * Lcore ID to serive latency statistics.
413 lcoreid_t latencystats_lcore_id = -1;
418 * Ethernet device configuration.
420 struct rte_eth_rxmode rx_mode = {
421 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
422 /**< Default maximum frame length. */
425 struct rte_eth_txmode tx_mode = {
426 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
429 struct rte_fdir_conf fdir_conf = {
430 .mode = RTE_FDIR_MODE_NONE,
431 .pballoc = RTE_FDIR_PBALLOC_64K,
432 .status = RTE_FDIR_REPORT_STATUS,
434 .vlan_tci_mask = 0xFFEF,
436 .src_ip = 0xFFFFFFFF,
437 .dst_ip = 0xFFFFFFFF,
440 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
441 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
443 .src_port_mask = 0xFFFF,
444 .dst_port_mask = 0xFFFF,
445 .mac_addr_byte_mask = 0xFF,
446 .tunnel_type_mask = 1,
447 .tunnel_id_mask = 0xFFFFFFFF,
452 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
454 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
455 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
457 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
458 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
460 uint16_t nb_tx_queue_stats_mappings = 0;
461 uint16_t nb_rx_queue_stats_mappings = 0;
464 * Display zero values by default for xstats
466 uint8_t xstats_hide_zero;
468 unsigned int num_sockets = 0;
469 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
471 #ifdef RTE_LIBRTE_BITRATE
472 /* Bitrate statistics */
473 struct rte_stats_bitrates *bitrate_data;
474 lcoreid_t bitrate_lcore_id;
475 uint8_t bitrate_enabled;
478 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
479 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
481 /* Forward function declarations */
482 static void setup_attached_port(portid_t pi);
483 static void map_port_queue_stats_mapping_registers(portid_t pi,
484 struct rte_port *port);
485 static void check_all_ports_link_status(uint32_t port_mask);
486 static int eth_event_callback(portid_t port_id,
487 enum rte_eth_event_type type,
488 void *param, void *ret_param);
489 static void dev_event_callback(const char *device_name,
490 enum rte_dev_event_type type,
494 * Check if all the ports are started.
495 * If yes, return positive value. If not, return zero.
497 static int all_ports_started(void);
499 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
500 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
503 * Helper function to check if socket is already discovered.
504 * If yes, return positive value. If not, return zero.
507 new_socket_id(unsigned int socket_id)
511 for (i = 0; i < num_sockets; i++) {
512 if (socket_ids[i] == socket_id)
519 * Setup default configuration.
522 set_default_fwd_lcores_config(void)
526 unsigned int sock_num;
529 for (i = 0; i < RTE_MAX_LCORE; i++) {
530 if (!rte_lcore_is_enabled(i))
532 sock_num = rte_lcore_to_socket_id(i);
533 if (new_socket_id(sock_num)) {
534 if (num_sockets >= RTE_MAX_NUMA_NODES) {
535 rte_exit(EXIT_FAILURE,
536 "Total sockets greater than %u\n",
539 socket_ids[num_sockets++] = sock_num;
541 if (i == rte_get_master_lcore())
543 fwd_lcores_cpuids[nb_lc++] = i;
545 nb_lcores = (lcoreid_t) nb_lc;
546 nb_cfg_lcores = nb_lcores;
551 set_def_peer_eth_addrs(void)
555 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
556 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
557 peer_eth_addrs[i].addr_bytes[5] = i;
562 set_default_fwd_ports_config(void)
567 RTE_ETH_FOREACH_DEV(pt_id) {
568 fwd_ports_ids[i++] = pt_id;
570 /* Update sockets info according to the attached device */
571 int socket_id = rte_eth_dev_socket_id(pt_id);
572 if (socket_id >= 0 && new_socket_id(socket_id)) {
573 if (num_sockets >= RTE_MAX_NUMA_NODES) {
574 rte_exit(EXIT_FAILURE,
575 "Total sockets greater than %u\n",
578 socket_ids[num_sockets++] = socket_id;
582 nb_cfg_ports = nb_ports;
583 nb_fwd_ports = nb_ports;
587 set_def_fwd_config(void)
589 set_default_fwd_lcores_config();
590 set_def_peer_eth_addrs();
591 set_default_fwd_ports_config();
594 /* extremely pessimistic estimation of memory required to create a mempool */
596 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
598 unsigned int n_pages, mbuf_per_pg, leftover;
599 uint64_t total_mem, mbuf_mem, obj_sz;
601 /* there is no good way to predict how much space the mempool will
602 * occupy because it will allocate chunks on the fly, and some of those
603 * will come from default DPDK memory while some will come from our
604 * external memory, so just assume 128MB will be enough for everyone.
606 uint64_t hdr_mem = 128 << 20;
608 /* account for possible non-contiguousness */
609 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
611 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
615 mbuf_per_pg = pgsz / obj_sz;
616 leftover = (nb_mbufs % mbuf_per_pg) > 0;
617 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
619 mbuf_mem = n_pages * pgsz;
621 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
623 if (total_mem > SIZE_MAX) {
624 TESTPMD_LOG(ERR, "Memory size too big\n");
627 *out = (size_t)total_mem;
633 pagesz_flags(uint64_t page_sz)
635 /* as per mmap() manpage, all page sizes are log2 of page size
636 * shifted by MAP_HUGE_SHIFT
638 int log2 = rte_log2_u64(page_sz);
640 return (log2 << HUGE_SHIFT);
644 alloc_mem(size_t memsz, size_t pgsz, bool huge)
649 /* allocate anonymous hugepages */
650 flags = MAP_ANONYMOUS | MAP_PRIVATE;
652 flags |= HUGE_FLAG | pagesz_flags(pgsz);
654 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
655 if (addr == MAP_FAILED)
661 struct extmem_param {
665 rte_iova_t *iova_table;
666 unsigned int iova_table_len;
670 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
673 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
674 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
675 unsigned int cur_page, n_pages, pgsz_idx;
676 size_t mem_sz, cur_pgsz;
677 rte_iova_t *iovas = NULL;
681 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
682 /* skip anything that is too big */
683 if (pgsizes[pgsz_idx] > SIZE_MAX)
686 cur_pgsz = pgsizes[pgsz_idx];
688 /* if we were told not to allocate hugepages, override */
690 cur_pgsz = sysconf(_SC_PAGESIZE);
692 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
694 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
698 /* allocate our memory */
699 addr = alloc_mem(mem_sz, cur_pgsz, huge);
701 /* if we couldn't allocate memory with a specified page size,
702 * that doesn't mean we can't do it with other page sizes, so
708 /* store IOVA addresses for every page in this memory area */
709 n_pages = mem_sz / cur_pgsz;
711 iovas = malloc(sizeof(*iovas) * n_pages);
714 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
717 /* lock memory if it's not huge pages */
721 /* populate IOVA addresses */
722 for (cur_page = 0; cur_page < n_pages; cur_page++) {
727 offset = cur_pgsz * cur_page;
728 cur = RTE_PTR_ADD(addr, offset);
730 /* touch the page before getting its IOVA */
731 *(volatile char *)cur = 0;
733 iova = rte_mem_virt2iova(cur);
735 iovas[cur_page] = iova;
740 /* if we couldn't allocate anything */
746 param->pgsz = cur_pgsz;
747 param->iova_table = iovas;
748 param->iova_table_len = n_pages;
755 munmap(addr, mem_sz);
761 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
763 struct extmem_param param;
766 memset(¶m, 0, sizeof(param));
768 /* check if our heap exists */
769 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
771 /* create our heap */
772 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
774 TESTPMD_LOG(ERR, "Cannot create heap\n");
779 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
781 TESTPMD_LOG(ERR, "Cannot create memory area\n");
785 /* we now have a valid memory area, so add it to heap */
786 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
787 param.addr, param.len, param.iova_table,
788 param.iova_table_len, param.pgsz);
790 /* when using VFIO, memory is automatically mapped for DMA by EAL */
792 /* not needed any more */
793 free(param.iova_table);
796 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
797 munmap(param.addr, param.len);
803 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
809 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
810 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
815 RTE_ETH_FOREACH_DEV(pid) {
816 struct rte_eth_dev *dev =
817 &rte_eth_devices[pid];
819 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
823 "unable to DMA unmap addr 0x%p "
825 memhdr->addr, dev->data->name);
828 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
831 "unable to un-register addr 0x%p\n", memhdr->addr);
836 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
837 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
840 size_t page_size = sysconf(_SC_PAGESIZE);
843 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
847 "unable to register addr 0x%p\n", memhdr->addr);
850 RTE_ETH_FOREACH_DEV(pid) {
851 struct rte_eth_dev *dev =
852 &rte_eth_devices[pid];
854 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
858 "unable to DMA map addr 0x%p "
860 memhdr->addr, dev->data->name);
866 * Configuration initialisation done once at init time.
868 static struct rte_mempool *
869 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
870 unsigned int socket_id)
872 char pool_name[RTE_MEMPOOL_NAMESIZE];
873 struct rte_mempool *rte_mp = NULL;
876 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
877 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
880 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
881 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
883 switch (mp_alloc_type) {
884 case MP_ALLOC_NATIVE:
886 /* wrapper to rte_mempool_create() */
887 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
888 rte_mbuf_best_mempool_ops());
889 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
890 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
895 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
896 mb_size, (unsigned int) mb_mempool_cache,
897 sizeof(struct rte_pktmbuf_pool_private),
898 socket_id, mempool_flags);
902 if (rte_mempool_populate_anon(rte_mp) == 0) {
903 rte_mempool_free(rte_mp);
907 rte_pktmbuf_pool_init(rte_mp, NULL);
908 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
909 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
913 case MP_ALLOC_XMEM_HUGE:
916 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
918 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
919 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
922 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
924 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
926 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
927 rte_mbuf_best_mempool_ops());
928 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
929 mb_mempool_cache, 0, mbuf_seg_size,
935 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
940 if (rte_mp == NULL) {
941 rte_exit(EXIT_FAILURE,
942 "Creation of mbuf pool for socket %u failed: %s\n",
943 socket_id, rte_strerror(rte_errno));
944 } else if (verbose_level > 0) {
945 rte_mempool_dump(stdout, rte_mp);
951 * Check given socket id is valid or not with NUMA mode,
952 * if valid, return 0, else return -1
955 check_socket_id(const unsigned int socket_id)
957 static int warning_once = 0;
959 if (new_socket_id(socket_id)) {
960 if (!warning_once && numa_support)
961 printf("Warning: NUMA should be configured manually by"
962 " using --port-numa-config and"
963 " --ring-numa-config parameters along with"
972 * Get the allowed maximum number of RX queues.
973 * *pid return the port id which has minimal value of
974 * max_rx_queues in all ports.
977 get_allowed_max_nb_rxq(portid_t *pid)
979 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
980 bool max_rxq_valid = false;
982 struct rte_eth_dev_info dev_info;
984 RTE_ETH_FOREACH_DEV(pi) {
985 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
988 max_rxq_valid = true;
989 if (dev_info.max_rx_queues < allowed_max_rxq) {
990 allowed_max_rxq = dev_info.max_rx_queues;
994 return max_rxq_valid ? allowed_max_rxq : 0;
998 * Check input rxq is valid or not.
999 * If input rxq is not greater than any of maximum number
1000 * of RX queues of all ports, it is valid.
1001 * if valid, return 0, else return -1
1004 check_nb_rxq(queueid_t rxq)
1006 queueid_t allowed_max_rxq;
1009 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1010 if (rxq > allowed_max_rxq) {
1011 printf("Fail: input rxq (%u) can't be greater "
1012 "than max_rx_queues (%u) of port %u\n",
1022 * Get the allowed maximum number of TX queues.
1023 * *pid return the port id which has minimal value of
1024 * max_tx_queues in all ports.
1027 get_allowed_max_nb_txq(portid_t *pid)
1029 queueid_t allowed_max_txq = MAX_QUEUE_ID;
1030 bool max_txq_valid = false;
1032 struct rte_eth_dev_info dev_info;
1034 RTE_ETH_FOREACH_DEV(pi) {
1035 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1038 max_txq_valid = true;
1039 if (dev_info.max_tx_queues < allowed_max_txq) {
1040 allowed_max_txq = dev_info.max_tx_queues;
1044 return max_txq_valid ? allowed_max_txq : 0;
1048 * Check input txq is valid or not.
1049 * If input txq is not greater than any of maximum number
1050 * of TX queues of all ports, it is valid.
1051 * if valid, return 0, else return -1
1054 check_nb_txq(queueid_t txq)
1056 queueid_t allowed_max_txq;
1059 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1060 if (txq > allowed_max_txq) {
1061 printf("Fail: input txq (%u) can't be greater "
1062 "than max_tx_queues (%u) of port %u\n",
1072 * Get the allowed maximum number of hairpin queues.
1073 * *pid return the port id which has minimal value of
1074 * max_hairpin_queues in all ports.
1077 get_allowed_max_nb_hairpinq(portid_t *pid)
1079 queueid_t allowed_max_hairpinq = MAX_QUEUE_ID;
1081 struct rte_eth_hairpin_cap cap;
1083 RTE_ETH_FOREACH_DEV(pi) {
1084 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1088 if (cap.max_nb_queues < allowed_max_hairpinq) {
1089 allowed_max_hairpinq = cap.max_nb_queues;
1093 return allowed_max_hairpinq;
1097 * Check input hairpin is valid or not.
1098 * If input hairpin is not greater than any of maximum number
1099 * of hairpin queues of all ports, it is valid.
1100 * if valid, return 0, else return -1
1103 check_nb_hairpinq(queueid_t hairpinq)
1105 queueid_t allowed_max_hairpinq;
1108 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1109 if (hairpinq > allowed_max_hairpinq) {
1110 printf("Fail: input hairpin (%u) can't be greater "
1111 "than max_hairpin_queues (%u) of port %u\n",
1112 hairpinq, allowed_max_hairpinq, pid);
1122 struct rte_port *port;
1123 struct rte_mempool *mbp;
1124 unsigned int nb_mbuf_per_pool;
1126 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1127 struct rte_gro_param gro_param;
1134 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1136 /* Configuration of logical cores. */
1137 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1138 sizeof(struct fwd_lcore *) * nb_lcores,
1139 RTE_CACHE_LINE_SIZE);
1140 if (fwd_lcores == NULL) {
1141 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1142 "failed\n", nb_lcores);
1144 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1145 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1146 sizeof(struct fwd_lcore),
1147 RTE_CACHE_LINE_SIZE);
1148 if (fwd_lcores[lc_id] == NULL) {
1149 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1152 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1155 RTE_ETH_FOREACH_DEV(pid) {
1157 /* Apply default TxRx configuration for all ports */
1158 port->dev_conf.txmode = tx_mode;
1159 port->dev_conf.rxmode = rx_mode;
1161 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1163 rte_exit(EXIT_FAILURE,
1164 "rte_eth_dev_info_get() failed\n");
1166 if (!(port->dev_info.tx_offload_capa &
1167 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1168 port->dev_conf.txmode.offloads &=
1169 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1171 if (port_numa[pid] != NUMA_NO_CONFIG)
1172 port_per_socket[port_numa[pid]]++;
1174 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1177 * if socket_id is invalid,
1178 * set to the first available socket.
1180 if (check_socket_id(socket_id) < 0)
1181 socket_id = socket_ids[0];
1182 port_per_socket[socket_id]++;
1186 /* Apply Rx offloads configuration */
1187 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1188 port->rx_conf[k].offloads =
1189 port->dev_conf.rxmode.offloads;
1190 /* Apply Tx offloads configuration */
1191 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1192 port->tx_conf[k].offloads =
1193 port->dev_conf.txmode.offloads;
1195 /* set flag to initialize port/queue */
1196 port->need_reconfig = 1;
1197 port->need_reconfig_queues = 1;
1198 port->tx_metadata = 0;
1200 /* Check for maximum number of segments per MTU. Accordingly
1201 * update the mbuf data size.
1203 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1204 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1205 data_size = rx_mode.max_rx_pkt_len /
1206 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1208 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1210 mbuf_data_size = data_size +
1211 RTE_PKTMBUF_HEADROOM;
1218 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1222 * Create pools of mbuf.
1223 * If NUMA support is disabled, create a single pool of mbuf in
1224 * socket 0 memory by default.
1225 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1227 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1228 * nb_txd can be configured at run time.
1230 if (param_total_num_mbufs)
1231 nb_mbuf_per_pool = param_total_num_mbufs;
1233 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1234 (nb_lcores * mb_mempool_cache) +
1235 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1236 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1242 for (i = 0; i < num_sockets; i++)
1243 mempools[i] = mbuf_pool_create(mbuf_data_size,
1247 if (socket_num == UMA_NO_CONFIG)
1248 mempools[0] = mbuf_pool_create(mbuf_data_size,
1249 nb_mbuf_per_pool, 0);
1251 mempools[socket_num] = mbuf_pool_create
1259 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1260 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1262 * Records which Mbuf pool to use by each logical core, if needed.
1264 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1265 mbp = mbuf_pool_find(
1266 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1269 mbp = mbuf_pool_find(0);
1270 fwd_lcores[lc_id]->mbp = mbp;
1271 /* initialize GSO context */
1272 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1273 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1274 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1275 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1277 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1280 /* Configuration of packet forwarding streams. */
1281 if (init_fwd_streams() < 0)
1282 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1286 /* create a gro context for each lcore */
1287 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1288 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1289 gro_param.max_item_per_flow = MAX_PKT_BURST;
1290 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1291 gro_param.socket_id = rte_lcore_to_socket_id(
1292 fwd_lcores_cpuids[lc_id]);
1293 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1294 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1295 rte_exit(EXIT_FAILURE,
1296 "rte_gro_ctx_create() failed\n");
1300 #if defined RTE_LIBRTE_PMD_SOFTNIC
1301 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1302 RTE_ETH_FOREACH_DEV(pid) {
1304 const char *driver = port->dev_info.driver_name;
1306 if (strcmp(driver, "net_softnic") == 0)
1307 port->softport.fwd_lcore_arg = fwd_lcores;
1316 reconfig(portid_t new_port_id, unsigned socket_id)
1318 struct rte_port *port;
1321 /* Reconfiguration of Ethernet ports. */
1322 port = &ports[new_port_id];
1324 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1328 /* set flag to initialize port/queue */
1329 port->need_reconfig = 1;
1330 port->need_reconfig_queues = 1;
1331 port->socket_id = socket_id;
1338 init_fwd_streams(void)
1341 struct rte_port *port;
1342 streamid_t sm_id, nb_fwd_streams_new;
1345 /* set socket id according to numa or not */
1346 RTE_ETH_FOREACH_DEV(pid) {
1348 if (nb_rxq > port->dev_info.max_rx_queues) {
1349 printf("Fail: nb_rxq(%d) is greater than "
1350 "max_rx_queues(%d)\n", nb_rxq,
1351 port->dev_info.max_rx_queues);
1354 if (nb_txq > port->dev_info.max_tx_queues) {
1355 printf("Fail: nb_txq(%d) is greater than "
1356 "max_tx_queues(%d)\n", nb_txq,
1357 port->dev_info.max_tx_queues);
1361 if (port_numa[pid] != NUMA_NO_CONFIG)
1362 port->socket_id = port_numa[pid];
1364 port->socket_id = rte_eth_dev_socket_id(pid);
1367 * if socket_id is invalid,
1368 * set to the first available socket.
1370 if (check_socket_id(port->socket_id) < 0)
1371 port->socket_id = socket_ids[0];
1375 if (socket_num == UMA_NO_CONFIG)
1376 port->socket_id = 0;
1378 port->socket_id = socket_num;
1382 q = RTE_MAX(nb_rxq, nb_txq);
1384 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1387 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1388 if (nb_fwd_streams_new == nb_fwd_streams)
1391 if (fwd_streams != NULL) {
1392 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1393 if (fwd_streams[sm_id] == NULL)
1395 rte_free(fwd_streams[sm_id]);
1396 fwd_streams[sm_id] = NULL;
1398 rte_free(fwd_streams);
1403 nb_fwd_streams = nb_fwd_streams_new;
1404 if (nb_fwd_streams) {
1405 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1406 sizeof(struct fwd_stream *) * nb_fwd_streams,
1407 RTE_CACHE_LINE_SIZE);
1408 if (fwd_streams == NULL)
1409 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1410 " (struct fwd_stream *)) failed\n",
1413 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1414 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1415 " struct fwd_stream", sizeof(struct fwd_stream),
1416 RTE_CACHE_LINE_SIZE);
1417 if (fwd_streams[sm_id] == NULL)
1418 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1419 "(struct fwd_stream) failed\n");
1426 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1428 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1430 unsigned int total_burst;
1431 unsigned int nb_burst;
1432 unsigned int burst_stats[3];
1433 uint16_t pktnb_stats[3];
1435 int burst_percent[3];
1438 * First compute the total number of packet bursts and the
1439 * two highest numbers of bursts of the same number of packets.
1442 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1443 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1444 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1445 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1448 total_burst += nb_burst;
1449 if (nb_burst > burst_stats[0]) {
1450 burst_stats[1] = burst_stats[0];
1451 pktnb_stats[1] = pktnb_stats[0];
1452 burst_stats[0] = nb_burst;
1453 pktnb_stats[0] = nb_pkt;
1454 } else if (nb_burst > burst_stats[1]) {
1455 burst_stats[1] = nb_burst;
1456 pktnb_stats[1] = nb_pkt;
1459 if (total_burst == 0)
1461 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1462 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1463 burst_percent[0], (int) pktnb_stats[0]);
1464 if (burst_stats[0] == total_burst) {
1468 if (burst_stats[0] + burst_stats[1] == total_burst) {
1469 printf(" + %d%% of %d pkts]\n",
1470 100 - burst_percent[0], pktnb_stats[1]);
1473 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1474 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1475 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1476 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1479 printf(" + %d%% of %d pkts + %d%% of others]\n",
1480 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1482 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1485 fwd_stream_stats_display(streamid_t stream_id)
1487 struct fwd_stream *fs;
1488 static const char *fwd_top_stats_border = "-------";
1490 fs = fwd_streams[stream_id];
1491 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1492 (fs->fwd_dropped == 0))
1494 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1495 "TX Port=%2d/Queue=%2d %s\n",
1496 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1497 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1498 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1499 " TX-dropped: %-14"PRIu64,
1500 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1502 /* if checksum mode */
1503 if (cur_fwd_eng == &csum_fwd_engine) {
1504 printf(" RX- bad IP checksum: %-14"PRIu64
1505 " Rx- bad L4 checksum: %-14"PRIu64
1506 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1507 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1508 fs->rx_bad_outer_l4_csum);
1513 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1514 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1515 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1520 fwd_stats_display(void)
1522 static const char *fwd_stats_border = "----------------------";
1523 static const char *acc_stats_border = "+++++++++++++++";
1525 struct fwd_stream *rx_stream;
1526 struct fwd_stream *tx_stream;
1527 uint64_t tx_dropped;
1528 uint64_t rx_bad_ip_csum;
1529 uint64_t rx_bad_l4_csum;
1530 uint64_t rx_bad_outer_l4_csum;
1531 } ports_stats[RTE_MAX_ETHPORTS];
1532 uint64_t total_rx_dropped = 0;
1533 uint64_t total_tx_dropped = 0;
1534 uint64_t total_rx_nombuf = 0;
1535 struct rte_eth_stats stats;
1536 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1537 uint64_t fwd_cycles = 0;
1539 uint64_t total_recv = 0;
1540 uint64_t total_xmit = 0;
1541 struct rte_port *port;
1546 memset(ports_stats, 0, sizeof(ports_stats));
1548 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1549 struct fwd_stream *fs = fwd_streams[sm_id];
1551 if (cur_fwd_config.nb_fwd_streams >
1552 cur_fwd_config.nb_fwd_ports) {
1553 fwd_stream_stats_display(sm_id);
1555 ports_stats[fs->tx_port].tx_stream = fs;
1556 ports_stats[fs->rx_port].rx_stream = fs;
1559 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1561 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1562 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1563 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1564 fs->rx_bad_outer_l4_csum;
1566 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1567 fwd_cycles += fs->core_cycles;
1570 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1573 pt_id = fwd_ports_ids[i];
1574 port = &ports[pt_id];
1576 rte_eth_stats_get(pt_id, &stats);
1577 stats.ipackets -= port->stats.ipackets;
1578 stats.opackets -= port->stats.opackets;
1579 stats.ibytes -= port->stats.ibytes;
1580 stats.obytes -= port->stats.obytes;
1581 stats.imissed -= port->stats.imissed;
1582 stats.oerrors -= port->stats.oerrors;
1583 stats.rx_nombuf -= port->stats.rx_nombuf;
1585 total_recv += stats.ipackets;
1586 total_xmit += stats.opackets;
1587 total_rx_dropped += stats.imissed;
1588 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1589 total_tx_dropped += stats.oerrors;
1590 total_rx_nombuf += stats.rx_nombuf;
1592 printf("\n %s Forward statistics for port %-2d %s\n",
1593 fwd_stats_border, pt_id, fwd_stats_border);
1595 if (!port->rx_queue_stats_mapping_enabled &&
1596 !port->tx_queue_stats_mapping_enabled) {
1597 printf(" RX-packets: %-14"PRIu64
1598 " RX-dropped: %-14"PRIu64
1599 "RX-total: %-"PRIu64"\n",
1600 stats.ipackets, stats.imissed,
1601 stats.ipackets + stats.imissed);
1603 if (cur_fwd_eng == &csum_fwd_engine)
1604 printf(" Bad-ipcsum: %-14"PRIu64
1605 " Bad-l4csum: %-14"PRIu64
1606 "Bad-outer-l4csum: %-14"PRIu64"\n",
1607 ports_stats[pt_id].rx_bad_ip_csum,
1608 ports_stats[pt_id].rx_bad_l4_csum,
1609 ports_stats[pt_id].rx_bad_outer_l4_csum);
1610 if (stats.ierrors + stats.rx_nombuf > 0) {
1611 printf(" RX-error: %-"PRIu64"\n",
1613 printf(" RX-nombufs: %-14"PRIu64"\n",
1617 printf(" TX-packets: %-14"PRIu64
1618 " TX-dropped: %-14"PRIu64
1619 "TX-total: %-"PRIu64"\n",
1620 stats.opackets, ports_stats[pt_id].tx_dropped,
1621 stats.opackets + ports_stats[pt_id].tx_dropped);
1623 printf(" RX-packets: %14"PRIu64
1624 " RX-dropped:%14"PRIu64
1625 " RX-total:%14"PRIu64"\n",
1626 stats.ipackets, stats.imissed,
1627 stats.ipackets + stats.imissed);
1629 if (cur_fwd_eng == &csum_fwd_engine)
1630 printf(" Bad-ipcsum:%14"PRIu64
1631 " Bad-l4csum:%14"PRIu64
1632 " Bad-outer-l4csum: %-14"PRIu64"\n",
1633 ports_stats[pt_id].rx_bad_ip_csum,
1634 ports_stats[pt_id].rx_bad_l4_csum,
1635 ports_stats[pt_id].rx_bad_outer_l4_csum);
1636 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1637 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1638 printf(" RX-nombufs: %14"PRIu64"\n",
1642 printf(" TX-packets: %14"PRIu64
1643 " TX-dropped:%14"PRIu64
1644 " TX-total:%14"PRIu64"\n",
1645 stats.opackets, ports_stats[pt_id].tx_dropped,
1646 stats.opackets + ports_stats[pt_id].tx_dropped);
1649 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1650 if (ports_stats[pt_id].rx_stream)
1651 pkt_burst_stats_display("RX",
1652 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1653 if (ports_stats[pt_id].tx_stream)
1654 pkt_burst_stats_display("TX",
1655 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1658 if (port->rx_queue_stats_mapping_enabled) {
1660 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1661 printf(" Stats reg %2d RX-packets:%14"PRIu64
1662 " RX-errors:%14"PRIu64
1663 " RX-bytes:%14"PRIu64"\n",
1664 j, stats.q_ipackets[j],
1665 stats.q_errors[j], stats.q_ibytes[j]);
1669 if (port->tx_queue_stats_mapping_enabled) {
1670 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1671 printf(" Stats reg %2d TX-packets:%14"PRIu64
1674 j, stats.q_opackets[j],
1679 printf(" %s--------------------------------%s\n",
1680 fwd_stats_border, fwd_stats_border);
1683 printf("\n %s Accumulated forward statistics for all ports"
1685 acc_stats_border, acc_stats_border);
1686 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1688 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1690 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1691 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1692 if (total_rx_nombuf > 0)
1693 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1694 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1696 acc_stats_border, acc_stats_border);
1697 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1699 printf("\n CPU cycles/packet=%u (total cycles="
1700 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1701 (unsigned int)(fwd_cycles / total_recv),
1702 fwd_cycles, total_recv);
1707 fwd_stats_reset(void)
1713 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1714 pt_id = fwd_ports_ids[i];
1715 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1717 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1718 struct fwd_stream *fs = fwd_streams[sm_id];
1722 fs->fwd_dropped = 0;
1723 fs->rx_bad_ip_csum = 0;
1724 fs->rx_bad_l4_csum = 0;
1725 fs->rx_bad_outer_l4_csum = 0;
1727 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1728 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1729 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1731 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1732 fs->core_cycles = 0;
1738 flush_fwd_rx_queues(void)
1740 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1747 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1748 uint64_t timer_period;
1750 /* convert to number of cycles */
1751 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1753 for (j = 0; j < 2; j++) {
1754 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1755 for (rxq = 0; rxq < nb_rxq; rxq++) {
1756 port_id = fwd_ports_ids[rxp];
1758 * testpmd can stuck in the below do while loop
1759 * if rte_eth_rx_burst() always returns nonzero
1760 * packets. So timer is added to exit this loop
1761 * after 1sec timer expiry.
1763 prev_tsc = rte_rdtsc();
1765 nb_rx = rte_eth_rx_burst(port_id, rxq,
1766 pkts_burst, MAX_PKT_BURST);
1767 for (i = 0; i < nb_rx; i++)
1768 rte_pktmbuf_free(pkts_burst[i]);
1770 cur_tsc = rte_rdtsc();
1771 diff_tsc = cur_tsc - prev_tsc;
1772 timer_tsc += diff_tsc;
1773 } while ((nb_rx > 0) &&
1774 (timer_tsc < timer_period));
1778 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1783 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1785 struct fwd_stream **fsm;
1788 #ifdef RTE_LIBRTE_BITRATE
1789 uint64_t tics_per_1sec;
1790 uint64_t tics_datum;
1791 uint64_t tics_current;
1792 uint16_t i, cnt_ports;
1794 cnt_ports = nb_ports;
1795 tics_datum = rte_rdtsc();
1796 tics_per_1sec = rte_get_timer_hz();
1798 fsm = &fwd_streams[fc->stream_idx];
1799 nb_fs = fc->stream_nb;
1801 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1802 (*pkt_fwd)(fsm[sm_id]);
1803 #ifdef RTE_LIBRTE_BITRATE
1804 if (bitrate_enabled != 0 &&
1805 bitrate_lcore_id == rte_lcore_id()) {
1806 tics_current = rte_rdtsc();
1807 if (tics_current - tics_datum >= tics_per_1sec) {
1808 /* Periodic bitrate calculation */
1809 for (i = 0; i < cnt_ports; i++)
1810 rte_stats_bitrate_calc(bitrate_data,
1812 tics_datum = tics_current;
1816 #ifdef RTE_LIBRTE_LATENCY_STATS
1817 if (latencystats_enabled != 0 &&
1818 latencystats_lcore_id == rte_lcore_id())
1819 rte_latencystats_update();
1822 } while (! fc->stopped);
1826 start_pkt_forward_on_core(void *fwd_arg)
1828 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1829 cur_fwd_config.fwd_eng->packet_fwd);
1834 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1835 * Used to start communication flows in network loopback test configurations.
1838 run_one_txonly_burst_on_core(void *fwd_arg)
1840 struct fwd_lcore *fwd_lc;
1841 struct fwd_lcore tmp_lcore;
1843 fwd_lc = (struct fwd_lcore *) fwd_arg;
1844 tmp_lcore = *fwd_lc;
1845 tmp_lcore.stopped = 1;
1846 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1851 * Launch packet forwarding:
1852 * - Setup per-port forwarding context.
1853 * - launch logical cores with their forwarding configuration.
1856 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1858 port_fwd_begin_t port_fwd_begin;
1863 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1864 if (port_fwd_begin != NULL) {
1865 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1866 (*port_fwd_begin)(fwd_ports_ids[i]);
1868 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1869 lc_id = fwd_lcores_cpuids[i];
1870 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1871 fwd_lcores[i]->stopped = 0;
1872 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1873 fwd_lcores[i], lc_id);
1875 printf("launch lcore %u failed - diag=%d\n",
1882 * Launch packet forwarding configuration.
1885 start_packet_forwarding(int with_tx_first)
1887 port_fwd_begin_t port_fwd_begin;
1888 port_fwd_end_t port_fwd_end;
1889 struct rte_port *port;
1893 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1894 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1896 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1897 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1899 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1900 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1901 (!nb_rxq || !nb_txq))
1902 rte_exit(EXIT_FAILURE,
1903 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1904 cur_fwd_eng->fwd_mode_name);
1906 if (all_ports_started() == 0) {
1907 printf("Not all ports were started\n");
1910 if (test_done == 0) {
1911 printf("Packet forwarding already started\n");
1917 for (i = 0; i < nb_fwd_ports; i++) {
1918 pt_id = fwd_ports_ids[i];
1919 port = &ports[pt_id];
1920 if (!port->dcb_flag) {
1921 printf("In DCB mode, all forwarding ports must "
1922 "be configured in this mode.\n");
1926 if (nb_fwd_lcores == 1) {
1927 printf("In DCB mode,the nb forwarding cores "
1928 "should be larger than 1.\n");
1937 flush_fwd_rx_queues();
1939 pkt_fwd_config_display(&cur_fwd_config);
1940 rxtx_config_display();
1943 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1944 pt_id = fwd_ports_ids[i];
1945 port = &ports[pt_id];
1946 map_port_queue_stats_mapping_registers(pt_id, port);
1948 if (with_tx_first) {
1949 port_fwd_begin = tx_only_engine.port_fwd_begin;
1950 if (port_fwd_begin != NULL) {
1951 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1952 (*port_fwd_begin)(fwd_ports_ids[i]);
1954 while (with_tx_first--) {
1955 launch_packet_forwarding(
1956 run_one_txonly_burst_on_core);
1957 rte_eal_mp_wait_lcore();
1959 port_fwd_end = tx_only_engine.port_fwd_end;
1960 if (port_fwd_end != NULL) {
1961 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1962 (*port_fwd_end)(fwd_ports_ids[i]);
1965 launch_packet_forwarding(start_pkt_forward_on_core);
1969 stop_packet_forwarding(void)
1971 port_fwd_end_t port_fwd_end;
1977 printf("Packet forwarding not started\n");
1980 printf("Telling cores to stop...");
1981 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1982 fwd_lcores[lc_id]->stopped = 1;
1983 printf("\nWaiting for lcores to finish...\n");
1984 rte_eal_mp_wait_lcore();
1985 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1986 if (port_fwd_end != NULL) {
1987 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1988 pt_id = fwd_ports_ids[i];
1989 (*port_fwd_end)(pt_id);
1993 fwd_stats_display();
1995 printf("\nDone.\n");
2000 dev_set_link_up(portid_t pid)
2002 if (rte_eth_dev_set_link_up(pid) < 0)
2003 printf("\nSet link up fail.\n");
2007 dev_set_link_down(portid_t pid)
2009 if (rte_eth_dev_set_link_down(pid) < 0)
2010 printf("\nSet link down fail.\n");
2014 all_ports_started(void)
2017 struct rte_port *port;
2019 RTE_ETH_FOREACH_DEV(pi) {
2021 /* Check if there is a port which is not started */
2022 if ((port->port_status != RTE_PORT_STARTED) &&
2023 (port->slave_flag == 0))
2027 /* No port is not started */
2032 port_is_stopped(portid_t port_id)
2034 struct rte_port *port = &ports[port_id];
2036 if ((port->port_status != RTE_PORT_STOPPED) &&
2037 (port->slave_flag == 0))
2043 all_ports_stopped(void)
2047 RTE_ETH_FOREACH_DEV(pi) {
2048 if (!port_is_stopped(pi))
2056 port_is_started(portid_t port_id)
2058 if (port_id_is_invalid(port_id, ENABLED_WARN))
2061 if (ports[port_id].port_status != RTE_PORT_STARTED)
2067 /* Configure the Rx and Tx hairpin queues for the selected port. */
2069 setup_hairpin_queues(portid_t pi)
2072 struct rte_eth_hairpin_conf hairpin_conf = {
2077 struct rte_port *port = &ports[pi];
2079 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2080 hairpin_conf.peers[0].port = pi;
2081 hairpin_conf.peers[0].queue = i + nb_rxq;
2082 diag = rte_eth_tx_hairpin_queue_setup
2083 (pi, qi, nb_txd, &hairpin_conf);
2088 /* Fail to setup rx queue, return */
2089 if (rte_atomic16_cmpset(&(port->port_status),
2091 RTE_PORT_STOPPED) == 0)
2092 printf("Port %d can not be set back "
2093 "to stopped\n", pi);
2094 printf("Fail to configure port %d hairpin "
2096 /* try to reconfigure queues next time */
2097 port->need_reconfig_queues = 1;
2100 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2101 hairpin_conf.peers[0].port = pi;
2102 hairpin_conf.peers[0].queue = i + nb_txq;
2103 diag = rte_eth_rx_hairpin_queue_setup
2104 (pi, qi, nb_rxd, &hairpin_conf);
2109 /* Fail to setup rx queue, return */
2110 if (rte_atomic16_cmpset(&(port->port_status),
2112 RTE_PORT_STOPPED) == 0)
2113 printf("Port %d can not be set back "
2114 "to stopped\n", pi);
2115 printf("Fail to configure port %d hairpin "
2117 /* try to reconfigure queues next time */
2118 port->need_reconfig_queues = 1;
2125 start_port(portid_t pid)
2127 int diag, need_check_link_status = -1;
2130 struct rte_port *port;
2131 struct rte_ether_addr mac_addr;
2132 struct rte_eth_hairpin_cap cap;
2134 if (port_id_is_invalid(pid, ENABLED_WARN))
2139 RTE_ETH_FOREACH_DEV(pi) {
2140 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2143 need_check_link_status = 0;
2145 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2146 RTE_PORT_HANDLING) == 0) {
2147 printf("Port %d is now not stopped\n", pi);
2151 if (port->need_reconfig > 0) {
2152 port->need_reconfig = 0;
2154 if (flow_isolate_all) {
2155 int ret = port_flow_isolate(pi, 1);
2157 printf("Failed to apply isolated"
2158 " mode on port %d\n", pi);
2162 configure_rxtx_dump_callbacks(0);
2163 printf("Configuring Port %d (socket %u)\n", pi,
2165 if (nb_hairpinq > 0 &&
2166 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2167 printf("Port %d doesn't support hairpin "
2171 /* configure port */
2172 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2173 nb_txq + nb_hairpinq,
2176 if (rte_atomic16_cmpset(&(port->port_status),
2177 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2178 printf("Port %d can not be set back "
2179 "to stopped\n", pi);
2180 printf("Fail to configure port %d\n", pi);
2181 /* try to reconfigure port next time */
2182 port->need_reconfig = 1;
2186 if (port->need_reconfig_queues > 0) {
2187 port->need_reconfig_queues = 0;
2188 /* setup tx queues */
2189 for (qi = 0; qi < nb_txq; qi++) {
2190 if ((numa_support) &&
2191 (txring_numa[pi] != NUMA_NO_CONFIG))
2192 diag = rte_eth_tx_queue_setup(pi, qi,
2193 port->nb_tx_desc[qi],
2195 &(port->tx_conf[qi]));
2197 diag = rte_eth_tx_queue_setup(pi, qi,
2198 port->nb_tx_desc[qi],
2200 &(port->tx_conf[qi]));
2205 /* Fail to setup tx queue, return */
2206 if (rte_atomic16_cmpset(&(port->port_status),
2208 RTE_PORT_STOPPED) == 0)
2209 printf("Port %d can not be set back "
2210 "to stopped\n", pi);
2211 printf("Fail to configure port %d tx queues\n",
2213 /* try to reconfigure queues next time */
2214 port->need_reconfig_queues = 1;
2217 for (qi = 0; qi < nb_rxq; qi++) {
2218 /* setup rx queues */
2219 if ((numa_support) &&
2220 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2221 struct rte_mempool * mp =
2222 mbuf_pool_find(rxring_numa[pi]);
2224 printf("Failed to setup RX queue:"
2225 "No mempool allocation"
2226 " on the socket %d\n",
2231 diag = rte_eth_rx_queue_setup(pi, qi,
2232 port->nb_rx_desc[qi],
2234 &(port->rx_conf[qi]),
2237 struct rte_mempool *mp =
2238 mbuf_pool_find(port->socket_id);
2240 printf("Failed to setup RX queue:"
2241 "No mempool allocation"
2242 " on the socket %d\n",
2246 diag = rte_eth_rx_queue_setup(pi, qi,
2247 port->nb_rx_desc[qi],
2249 &(port->rx_conf[qi]),
2255 /* Fail to setup rx queue, return */
2256 if (rte_atomic16_cmpset(&(port->port_status),
2258 RTE_PORT_STOPPED) == 0)
2259 printf("Port %d can not be set back "
2260 "to stopped\n", pi);
2261 printf("Fail to configure port %d rx queues\n",
2263 /* try to reconfigure queues next time */
2264 port->need_reconfig_queues = 1;
2267 /* setup hairpin queues */
2268 if (setup_hairpin_queues(pi) != 0)
2271 configure_rxtx_dump_callbacks(verbose_level);
2273 if (rte_eth_dev_start(pi) < 0) {
2274 printf("Fail to start port %d\n", pi);
2276 /* Fail to setup rx queue, return */
2277 if (rte_atomic16_cmpset(&(port->port_status),
2278 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2279 printf("Port %d can not be set back to "
2284 if (rte_atomic16_cmpset(&(port->port_status),
2285 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2286 printf("Port %d can not be set into started\n", pi);
2288 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2289 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2290 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2291 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2292 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2294 /* at least one port started, need checking link status */
2295 need_check_link_status = 1;
2298 if (need_check_link_status == 1 && !no_link_check)
2299 check_all_ports_link_status(RTE_PORT_ALL);
2300 else if (need_check_link_status == 0)
2301 printf("Please stop the ports first\n");
2308 stop_port(portid_t pid)
2311 struct rte_port *port;
2312 int need_check_link_status = 0;
2319 if (port_id_is_invalid(pid, ENABLED_WARN))
2322 printf("Stopping ports...\n");
2324 RTE_ETH_FOREACH_DEV(pi) {
2325 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2328 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2329 printf("Please remove port %d from forwarding configuration.\n", pi);
2333 if (port_is_bonding_slave(pi)) {
2334 printf("Please remove port %d from bonded device.\n", pi);
2339 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2340 RTE_PORT_HANDLING) == 0)
2343 rte_eth_dev_stop(pi);
2345 if (rte_atomic16_cmpset(&(port->port_status),
2346 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2347 printf("Port %d can not be set into stopped\n", pi);
2348 need_check_link_status = 1;
2350 if (need_check_link_status && !no_link_check)
2351 check_all_ports_link_status(RTE_PORT_ALL);
2357 remove_invalid_ports_in(portid_t *array, portid_t *total)
2360 portid_t new_total = 0;
2362 for (i = 0; i < *total; i++)
2363 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2364 array[new_total] = array[i];
2371 remove_invalid_ports(void)
2373 remove_invalid_ports_in(ports_ids, &nb_ports);
2374 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2375 nb_cfg_ports = nb_fwd_ports;
2379 close_port(portid_t pid)
2382 struct rte_port *port;
2384 if (port_id_is_invalid(pid, ENABLED_WARN))
2387 printf("Closing ports...\n");
2389 RTE_ETH_FOREACH_DEV(pi) {
2390 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2393 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2394 printf("Please remove port %d from forwarding configuration.\n", pi);
2398 if (port_is_bonding_slave(pi)) {
2399 printf("Please remove port %d from bonded device.\n", pi);
2404 if (rte_atomic16_cmpset(&(port->port_status),
2405 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2406 printf("Port %d is already closed\n", pi);
2410 if (rte_atomic16_cmpset(&(port->port_status),
2411 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2412 printf("Port %d is now not stopped\n", pi);
2416 if (port->flow_list)
2417 port_flow_flush(pi);
2418 rte_eth_dev_close(pi);
2420 remove_invalid_ports();
2422 if (rte_atomic16_cmpset(&(port->port_status),
2423 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2424 printf("Port %d cannot be set to closed\n", pi);
2431 reset_port(portid_t pid)
2435 struct rte_port *port;
2437 if (port_id_is_invalid(pid, ENABLED_WARN))
2440 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2441 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2442 printf("Can not reset port(s), please stop port(s) first.\n");
2446 printf("Resetting ports...\n");
2448 RTE_ETH_FOREACH_DEV(pi) {
2449 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2452 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2453 printf("Please remove port %d from forwarding "
2454 "configuration.\n", pi);
2458 if (port_is_bonding_slave(pi)) {
2459 printf("Please remove port %d from bonded device.\n",
2464 diag = rte_eth_dev_reset(pi);
2467 port->need_reconfig = 1;
2468 port->need_reconfig_queues = 1;
2470 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2478 attach_port(char *identifier)
2481 struct rte_dev_iterator iterator;
2483 printf("Attaching a new port...\n");
2485 if (identifier == NULL) {
2486 printf("Invalid parameters are specified\n");
2490 if (rte_dev_probe(identifier) < 0) {
2491 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2495 /* first attach mode: event */
2496 if (setup_on_probe_event) {
2497 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2498 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2499 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2500 ports[pi].need_setup != 0)
2501 setup_attached_port(pi);
2505 /* second attach mode: iterator */
2506 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2507 /* setup ports matching the devargs used for probing */
2508 if (port_is_forwarding(pi))
2509 continue; /* port was already attached before */
2510 setup_attached_port(pi);
2515 setup_attached_port(portid_t pi)
2517 unsigned int socket_id;
2520 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2521 /* if socket_id is invalid, set to the first available socket. */
2522 if (check_socket_id(socket_id) < 0)
2523 socket_id = socket_ids[0];
2524 reconfig(pi, socket_id);
2525 ret = rte_eth_promiscuous_enable(pi);
2527 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2528 pi, rte_strerror(-ret));
2530 ports_ids[nb_ports++] = pi;
2531 fwd_ports_ids[nb_fwd_ports++] = pi;
2532 nb_cfg_ports = nb_fwd_ports;
2533 ports[pi].need_setup = 0;
2534 ports[pi].port_status = RTE_PORT_STOPPED;
2536 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2541 detach_port_device(portid_t port_id)
2543 struct rte_device *dev;
2546 printf("Removing a device...\n");
2548 dev = rte_eth_devices[port_id].device;
2550 printf("Device already removed\n");
2554 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2555 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2556 printf("Port not stopped\n");
2559 printf("Port was not closed\n");
2560 if (ports[port_id].flow_list)
2561 port_flow_flush(port_id);
2564 if (rte_dev_remove(dev) < 0) {
2565 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2568 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2569 /* reset mapping between old ports and removed device */
2570 rte_eth_devices[sibling].device = NULL;
2571 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2572 /* sibling ports are forced to be closed */
2573 ports[sibling].port_status = RTE_PORT_CLOSED;
2574 printf("Port %u is closed\n", sibling);
2578 remove_invalid_ports();
2580 printf("Device of port %u is detached\n", port_id);
2581 printf("Now total ports is %d\n", nb_ports);
2587 detach_device(char *identifier)
2589 struct rte_dev_iterator iterator;
2590 struct rte_devargs da;
2593 printf("Removing a device...\n");
2595 memset(&da, 0, sizeof(da));
2596 if (rte_devargs_parsef(&da, "%s", identifier)) {
2597 printf("cannot parse identifier\n");
2603 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2604 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2605 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2606 printf("Port %u not stopped\n", port_id);
2610 /* sibling ports are forced to be closed */
2611 if (ports[port_id].flow_list)
2612 port_flow_flush(port_id);
2613 ports[port_id].port_status = RTE_PORT_CLOSED;
2614 printf("Port %u is now closed\n", port_id);
2618 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2619 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2620 da.name, da.bus->name);
2624 remove_invalid_ports();
2626 printf("Device %s is detached\n", identifier);
2627 printf("Now total ports is %d\n", nb_ports);
2639 stop_packet_forwarding();
2641 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2643 if (mp_alloc_type == MP_ALLOC_ANON)
2644 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2648 if (ports != NULL) {
2650 RTE_ETH_FOREACH_DEV(pt_id) {
2651 printf("\nStopping port %d...\n", pt_id);
2655 RTE_ETH_FOREACH_DEV(pt_id) {
2656 printf("\nShutting down port %d...\n", pt_id);
2663 ret = rte_dev_event_monitor_stop();
2666 "fail to stop device event monitor.");
2670 ret = rte_dev_event_callback_unregister(NULL,
2671 dev_event_callback, NULL);
2674 "fail to unregister device event callback.\n");
2678 ret = rte_dev_hotplug_handle_disable();
2681 "fail to disable hotplug handling.\n");
2685 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2687 rte_mempool_free(mempools[i]);
2690 printf("\nBye...\n");
2693 typedef void (*cmd_func_t)(void);
2694 struct pmd_test_command {
2695 const char *cmd_name;
2696 cmd_func_t cmd_func;
2699 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2701 /* Check the link status of all ports in up to 9s, and print them finally */
2703 check_all_ports_link_status(uint32_t port_mask)
2705 #define CHECK_INTERVAL 100 /* 100ms */
2706 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2708 uint8_t count, all_ports_up, print_flag = 0;
2709 struct rte_eth_link link;
2712 printf("Checking link statuses...\n");
2714 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2716 RTE_ETH_FOREACH_DEV(portid) {
2717 if ((port_mask & (1 << portid)) == 0)
2719 memset(&link, 0, sizeof(link));
2720 ret = rte_eth_link_get_nowait(portid, &link);
2723 if (print_flag == 1)
2724 printf("Port %u link get failed: %s\n",
2725 portid, rte_strerror(-ret));
2728 /* print link status if flag set */
2729 if (print_flag == 1) {
2730 if (link.link_status)
2732 "Port%d Link Up. speed %u Mbps- %s\n",
2733 portid, link.link_speed,
2734 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2735 ("full-duplex") : ("half-duplex\n"));
2737 printf("Port %d Link Down\n", portid);
2740 /* clear all_ports_up flag if any link down */
2741 if (link.link_status == ETH_LINK_DOWN) {
2746 /* after finally printing all link status, get out */
2747 if (print_flag == 1)
2750 if (all_ports_up == 0) {
2752 rte_delay_ms(CHECK_INTERVAL);
2755 /* set the print_flag if all ports up or timeout */
2756 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2766 * This callback is for remove a port for a device. It has limitation because
2767 * it is not for multiple port removal for a device.
2768 * TODO: the device detach invoke will plan to be removed from user side to
2769 * eal. And convert all PMDs to free port resources on ether device closing.
2772 rmv_port_callback(void *arg)
2774 int need_to_start = 0;
2775 int org_no_link_check = no_link_check;
2776 portid_t port_id = (intptr_t)arg;
2778 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2780 if (!test_done && port_is_forwarding(port_id)) {
2782 stop_packet_forwarding();
2786 no_link_check = org_no_link_check;
2787 close_port(port_id);
2788 detach_port_device(port_id);
2790 start_packet_forwarding(0);
2793 /* This function is used by the interrupt thread */
2795 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2798 RTE_SET_USED(param);
2799 RTE_SET_USED(ret_param);
2801 if (type >= RTE_ETH_EVENT_MAX) {
2802 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2803 port_id, __func__, type);
2805 } else if (event_print_mask & (UINT32_C(1) << type)) {
2806 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2807 eth_event_desc[type]);
2812 case RTE_ETH_EVENT_NEW:
2813 ports[port_id].need_setup = 1;
2814 ports[port_id].port_status = RTE_PORT_HANDLING;
2816 case RTE_ETH_EVENT_INTR_RMV:
2817 if (port_id_is_invalid(port_id, DISABLED_WARN))
2819 if (rte_eal_alarm_set(100000,
2820 rmv_port_callback, (void *)(intptr_t)port_id))
2821 fprintf(stderr, "Could not set up deferred device removal\n");
2830 register_eth_event_callback(void)
2833 enum rte_eth_event_type event;
2835 for (event = RTE_ETH_EVENT_UNKNOWN;
2836 event < RTE_ETH_EVENT_MAX; event++) {
2837 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2842 TESTPMD_LOG(ERR, "Failed to register callback for "
2843 "%s event\n", eth_event_desc[event]);
2851 /* This function is used by the interrupt thread */
2853 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2854 __rte_unused void *arg)
2859 if (type >= RTE_DEV_EVENT_MAX) {
2860 fprintf(stderr, "%s called upon invalid event %d\n",
2866 case RTE_DEV_EVENT_REMOVE:
2867 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2869 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2871 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2876 * Because the user's callback is invoked in eal interrupt
2877 * callback, the interrupt callback need to be finished before
2878 * it can be unregistered when detaching device. So finish
2879 * callback soon and use a deferred removal to detach device
2880 * is need. It is a workaround, once the device detaching be
2881 * moved into the eal in the future, the deferred removal could
2884 if (rte_eal_alarm_set(100000,
2885 rmv_port_callback, (void *)(intptr_t)port_id))
2887 "Could not set up deferred device removal\n");
2889 case RTE_DEV_EVENT_ADD:
2890 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2892 /* TODO: After finish kernel driver binding,
2893 * begin to attach port.
2902 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2906 uint8_t mapping_found = 0;
2908 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2909 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2910 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2911 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2912 tx_queue_stats_mappings[i].queue_id,
2913 tx_queue_stats_mappings[i].stats_counter_id);
2920 port->tx_queue_stats_mapping_enabled = 1;
2925 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2929 uint8_t mapping_found = 0;
2931 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2932 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2933 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2934 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2935 rx_queue_stats_mappings[i].queue_id,
2936 rx_queue_stats_mappings[i].stats_counter_id);
2943 port->rx_queue_stats_mapping_enabled = 1;
2948 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2952 diag = set_tx_queue_stats_mapping_registers(pi, port);
2954 if (diag == -ENOTSUP) {
2955 port->tx_queue_stats_mapping_enabled = 0;
2956 printf("TX queue stats mapping not supported port id=%d\n", pi);
2959 rte_exit(EXIT_FAILURE,
2960 "set_tx_queue_stats_mapping_registers "
2961 "failed for port id=%d diag=%d\n",
2965 diag = set_rx_queue_stats_mapping_registers(pi, port);
2967 if (diag == -ENOTSUP) {
2968 port->rx_queue_stats_mapping_enabled = 0;
2969 printf("RX queue stats mapping not supported port id=%d\n", pi);
2972 rte_exit(EXIT_FAILURE,
2973 "set_rx_queue_stats_mapping_registers "
2974 "failed for port id=%d diag=%d\n",
2980 rxtx_port_config(struct rte_port *port)
2985 for (qid = 0; qid < nb_rxq; qid++) {
2986 offloads = port->rx_conf[qid].offloads;
2987 port->rx_conf[qid] = port->dev_info.default_rxconf;
2989 port->rx_conf[qid].offloads = offloads;
2991 /* Check if any Rx parameters have been passed */
2992 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2993 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2995 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2996 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2998 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2999 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3001 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3002 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3004 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3005 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3007 port->nb_rx_desc[qid] = nb_rxd;
3010 for (qid = 0; qid < nb_txq; qid++) {
3011 offloads = port->tx_conf[qid].offloads;
3012 port->tx_conf[qid] = port->dev_info.default_txconf;
3014 port->tx_conf[qid].offloads = offloads;
3016 /* Check if any Tx parameters have been passed */
3017 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3018 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3020 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3021 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3023 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3024 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3026 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3027 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3029 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3030 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3032 port->nb_tx_desc[qid] = nb_txd;
3037 init_port_config(void)
3040 struct rte_port *port;
3043 RTE_ETH_FOREACH_DEV(pid) {
3045 port->dev_conf.fdir_conf = fdir_conf;
3047 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3052 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3053 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3054 rss_hf & port->dev_info.flow_type_rss_offloads;
3056 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3057 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3060 if (port->dcb_flag == 0) {
3061 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3062 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
3064 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3067 rxtx_port_config(port);
3069 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3073 map_port_queue_stats_mapping_registers(pid, port);
3074 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3075 rte_pmd_ixgbe_bypass_init(pid);
3078 if (lsc_interrupt &&
3079 (rte_eth_devices[pid].data->dev_flags &
3080 RTE_ETH_DEV_INTR_LSC))
3081 port->dev_conf.intr_conf.lsc = 1;
3082 if (rmv_interrupt &&
3083 (rte_eth_devices[pid].data->dev_flags &
3084 RTE_ETH_DEV_INTR_RMV))
3085 port->dev_conf.intr_conf.rmv = 1;
3089 void set_port_slave_flag(portid_t slave_pid)
3091 struct rte_port *port;
3093 port = &ports[slave_pid];
3094 port->slave_flag = 1;
3097 void clear_port_slave_flag(portid_t slave_pid)
3099 struct rte_port *port;
3101 port = &ports[slave_pid];
3102 port->slave_flag = 0;
3105 uint8_t port_is_bonding_slave(portid_t slave_pid)
3107 struct rte_port *port;
3109 port = &ports[slave_pid];
3110 if ((rte_eth_devices[slave_pid].data->dev_flags &
3111 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3116 const uint16_t vlan_tags[] = {
3117 0, 1, 2, 3, 4, 5, 6, 7,
3118 8, 9, 10, 11, 12, 13, 14, 15,
3119 16, 17, 18, 19, 20, 21, 22, 23,
3120 24, 25, 26, 27, 28, 29, 30, 31
3124 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3125 enum dcb_mode_enable dcb_mode,
3126 enum rte_eth_nb_tcs num_tcs,
3131 struct rte_eth_rss_conf rss_conf;
3134 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3135 * given above, and the number of traffic classes available for use.
3137 if (dcb_mode == DCB_VT_ENABLED) {
3138 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3139 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3140 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3141 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3143 /* VMDQ+DCB RX and TX configurations */
3144 vmdq_rx_conf->enable_default_pool = 0;
3145 vmdq_rx_conf->default_pool = 0;
3146 vmdq_rx_conf->nb_queue_pools =
3147 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3148 vmdq_tx_conf->nb_queue_pools =
3149 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3151 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3152 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3153 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3154 vmdq_rx_conf->pool_map[i].pools =
3155 1 << (i % vmdq_rx_conf->nb_queue_pools);
3157 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3158 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3159 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3162 /* set DCB mode of RX and TX of multiple queues */
3163 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
3164 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3166 struct rte_eth_dcb_rx_conf *rx_conf =
3167 ð_conf->rx_adv_conf.dcb_rx_conf;
3168 struct rte_eth_dcb_tx_conf *tx_conf =
3169 ð_conf->tx_adv_conf.dcb_tx_conf;
3171 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3175 rx_conf->nb_tcs = num_tcs;
3176 tx_conf->nb_tcs = num_tcs;
3178 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3179 rx_conf->dcb_tc[i] = i % num_tcs;
3180 tx_conf->dcb_tc[i] = i % num_tcs;
3183 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3184 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3185 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3189 eth_conf->dcb_capability_en =
3190 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3192 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3198 init_port_dcb_config(portid_t pid,
3199 enum dcb_mode_enable dcb_mode,
3200 enum rte_eth_nb_tcs num_tcs,
3203 struct rte_eth_conf port_conf;
3204 struct rte_port *rte_port;
3208 rte_port = &ports[pid];
3210 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3211 /* Enter DCB configuration status */
3214 port_conf.rxmode = rte_port->dev_conf.rxmode;
3215 port_conf.txmode = rte_port->dev_conf.txmode;
3217 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3218 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3221 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3223 /* re-configure the device . */
3224 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3228 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3232 /* If dev_info.vmdq_pool_base is greater than 0,
3233 * the queue id of vmdq pools is started after pf queues.
3235 if (dcb_mode == DCB_VT_ENABLED &&
3236 rte_port->dev_info.vmdq_pool_base > 0) {
3237 printf("VMDQ_DCB multi-queue mode is nonsensical"
3238 " for port %d.", pid);
3242 /* Assume the ports in testpmd have the same dcb capability
3243 * and has the same number of rxq and txq in dcb mode
3245 if (dcb_mode == DCB_VT_ENABLED) {
3246 if (rte_port->dev_info.max_vfs > 0) {
3247 nb_rxq = rte_port->dev_info.nb_rx_queues;
3248 nb_txq = rte_port->dev_info.nb_tx_queues;
3250 nb_rxq = rte_port->dev_info.max_rx_queues;
3251 nb_txq = rte_port->dev_info.max_tx_queues;
3254 /*if vt is disabled, use all pf queues */
3255 if (rte_port->dev_info.vmdq_pool_base == 0) {
3256 nb_rxq = rte_port->dev_info.max_rx_queues;
3257 nb_txq = rte_port->dev_info.max_tx_queues;
3259 nb_rxq = (queueid_t)num_tcs;
3260 nb_txq = (queueid_t)num_tcs;
3264 rx_free_thresh = 64;
3266 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3268 rxtx_port_config(rte_port);
3270 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3271 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3272 rx_vft_set(pid, vlan_tags[i], 1);
3274 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3278 map_port_queue_stats_mapping_registers(pid, rte_port);
3280 rte_port->dcb_flag = 1;
3288 /* Configuration of Ethernet ports. */
3289 ports = rte_zmalloc("testpmd: ports",
3290 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3291 RTE_CACHE_LINE_SIZE);
3292 if (ports == NULL) {
3293 rte_exit(EXIT_FAILURE,
3294 "rte_zmalloc(%d struct rte_port) failed\n",
3298 /* Initialize ports NUMA structures */
3299 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3300 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3301 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3315 const char clr[] = { 27, '[', '2', 'J', '\0' };
3316 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3318 /* Clear screen and move to top left */
3319 printf("%s%s", clr, top_left);
3321 printf("\nPort statistics ====================================");
3322 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3323 nic_stats_display(fwd_ports_ids[i]);
3329 signal_handler(int signum)
3331 if (signum == SIGINT || signum == SIGTERM) {
3332 printf("\nSignal %d received, preparing to exit...\n",
3334 #ifdef RTE_LIBRTE_PDUMP
3335 /* uninitialize packet capture framework */
3338 #ifdef RTE_LIBRTE_LATENCY_STATS
3339 if (latencystats_enabled != 0)
3340 rte_latencystats_uninit();
3343 /* Set flag to indicate the force termination. */
3345 /* exit with the expected status */
3346 signal(signum, SIG_DFL);
3347 kill(getpid(), signum);
3352 main(int argc, char** argv)
3359 signal(SIGINT, signal_handler);
3360 signal(SIGTERM, signal_handler);
3362 testpmd_logtype = rte_log_register("testpmd");
3363 if (testpmd_logtype < 0)
3364 rte_exit(EXIT_FAILURE, "Cannot register log type");
3365 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3367 diag = rte_eal_init(argc, argv);
3369 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3370 rte_strerror(rte_errno));
3372 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3373 rte_exit(EXIT_FAILURE,
3374 "Secondary process type not supported.\n");
3376 ret = register_eth_event_callback();
3378 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3380 #ifdef RTE_LIBRTE_PDUMP
3381 /* initialize packet capture framework */
3386 RTE_ETH_FOREACH_DEV(port_id) {
3387 ports_ids[count] = port_id;
3390 nb_ports = (portid_t) count;
3392 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3394 /* allocate port structures, and init them */
3397 set_def_fwd_config();
3399 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3400 "Check the core mask argument\n");
3402 /* Bitrate/latency stats disabled by default */
3403 #ifdef RTE_LIBRTE_BITRATE
3404 bitrate_enabled = 0;
3406 #ifdef RTE_LIBRTE_LATENCY_STATS
3407 latencystats_enabled = 0;
3410 /* on FreeBSD, mlockall() is disabled by default */
3411 #ifdef RTE_EXEC_ENV_FREEBSD
3420 launch_args_parse(argc, argv);
3422 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3423 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3427 if (tx_first && interactive)
3428 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3429 "interactive mode.\n");
3431 if (tx_first && lsc_interrupt) {
3432 printf("Warning: lsc_interrupt needs to be off when "
3433 " using tx_first. Disabling.\n");
3437 if (!nb_rxq && !nb_txq)
3438 printf("Warning: Either rx or tx queues should be non-zero\n");
3440 if (nb_rxq > 1 && nb_rxq > nb_txq)
3441 printf("Warning: nb_rxq=%d enables RSS configuration, "
3442 "but nb_txq=%d will prevent to fully test it.\n",
3448 ret = rte_dev_hotplug_handle_enable();
3451 "fail to enable hotplug handling.");
3455 ret = rte_dev_event_monitor_start();
3458 "fail to start device event monitoring.");
3462 ret = rte_dev_event_callback_register(NULL,
3463 dev_event_callback, NULL);
3466 "fail to register device event callback\n");
3471 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3472 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3474 /* set all ports to promiscuous mode by default */
3475 RTE_ETH_FOREACH_DEV(port_id) {
3476 ret = rte_eth_promiscuous_enable(port_id);
3478 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3479 port_id, rte_strerror(-ret));
3482 /* Init metrics library */
3483 rte_metrics_init(rte_socket_id());
3485 #ifdef RTE_LIBRTE_LATENCY_STATS
3486 if (latencystats_enabled != 0) {
3487 int ret = rte_latencystats_init(1, NULL);
3489 printf("Warning: latencystats init()"
3490 " returned error %d\n", ret);
3491 printf("Latencystats running on lcore %d\n",
3492 latencystats_lcore_id);
3496 /* Setup bitrate stats */
3497 #ifdef RTE_LIBRTE_BITRATE
3498 if (bitrate_enabled != 0) {
3499 bitrate_data = rte_stats_bitrate_create();
3500 if (bitrate_data == NULL)
3501 rte_exit(EXIT_FAILURE,
3502 "Could not allocate bitrate data.\n");
3503 rte_stats_bitrate_reg(bitrate_data);
3507 #ifdef RTE_LIBRTE_CMDLINE
3508 if (strlen(cmdline_filename) != 0)
3509 cmdline_read_from_file(cmdline_filename);
3511 if (interactive == 1) {
3513 printf("Start automatic packet forwarding\n");
3514 start_packet_forwarding(0);
3526 printf("No commandline core given, start packet forwarding\n");
3527 start_packet_forwarding(tx_first);
3528 if (stats_period != 0) {
3529 uint64_t prev_time = 0, cur_time, diff_time = 0;
3530 uint64_t timer_period;
3532 /* Convert to number of cycles */
3533 timer_period = stats_period * rte_get_timer_hz();
3535 while (f_quit == 0) {
3536 cur_time = rte_get_timer_cycles();
3537 diff_time += cur_time - prev_time;
3539 if (diff_time >= timer_period) {
3541 /* Reset the timer */
3544 /* Sleep to avoid unnecessary checks */
3545 prev_time = cur_time;
3550 printf("Press enter to exit\n");
3551 rc = read(0, &c, 1);