1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #ifdef RTE_LIBRTE_IEEE1588
183 &ieee1588_fwd_engine,
188 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
189 uint16_t mempool_flags;
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint8_t txonly_multi_flow;
221 /**< Whether multiple flows are generated in TXONLY mode. */
223 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
224 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
226 /* current configuration is in DCB or not,0 means it is not in DCB mode */
227 uint8_t dcb_config = 0;
229 /* Whether the dcb is in testing status */
230 uint8_t dcb_test = 0;
233 * Configurable number of RX/TX queues.
235 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
236 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
237 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
240 * Configurable number of RX/TX ring descriptors.
241 * Defaults are supplied by drivers via ethdev.
243 #define RTE_TEST_RX_DESC_DEFAULT 0
244 #define RTE_TEST_TX_DESC_DEFAULT 0
245 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
246 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
248 #define RTE_PMD_PARAM_UNSET -1
250 * Configurable values of RX and TX ring threshold registers.
253 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
257 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
258 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of RX free threshold.
264 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
267 * Configurable value of RX drop enable.
269 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
272 * Configurable value of TX free threshold.
274 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
277 * Configurable value of TX RS bit threshold.
279 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
282 * Configurable value of buffered packets before sending.
284 uint16_t noisy_tx_sw_bufsz;
287 * Configurable value of packet buffer timeout.
289 uint16_t noisy_tx_sw_buf_flush_time;
292 * Configurable value for size of VNF internal memory area
293 * used for simulating noisy neighbour behaviour
295 uint64_t noisy_lkup_mem_sz;
298 * Configurable value of number of random writes done in
299 * VNF simulation memory area.
301 uint64_t noisy_lkup_num_writes;
304 * Configurable value of number of random reads done in
305 * VNF simulation memory area.
307 uint64_t noisy_lkup_num_reads;
310 * Configurable value of number of random reads/writes done in
311 * VNF simulation memory area.
313 uint64_t noisy_lkup_num_reads_writes;
316 * Receive Side Scaling (RSS) configuration.
318 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
321 * Port topology configuration
323 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
326 * Avoids to flush all the RX streams before starts forwarding.
328 uint8_t no_flush_rx = 0; /* flush by default */
331 * Flow API isolated mode.
333 uint8_t flow_isolate_all;
336 * Avoids to check link status when starting/stopping a port.
338 uint8_t no_link_check = 0; /* check by default */
341 * Don't automatically start all ports in interactive mode.
343 uint8_t no_device_start = 0;
346 * Enable link status change notification
348 uint8_t lsc_interrupt = 1; /* enabled by default */
351 * Enable device removal notification.
353 uint8_t rmv_interrupt = 1; /* enabled by default */
355 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
357 /* After attach, port setup is called on event or by iterator */
358 bool setup_on_probe_event = true;
360 /* Clear ptypes on port initialization. */
361 uint8_t clear_ptypes = true;
363 /* Pretty printing of ethdev events */
364 static const char * const eth_event_desc[] = {
365 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
366 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
367 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
368 [RTE_ETH_EVENT_INTR_RESET] = "reset",
369 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
370 [RTE_ETH_EVENT_IPSEC] = "IPsec",
371 [RTE_ETH_EVENT_MACSEC] = "MACsec",
372 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
373 [RTE_ETH_EVENT_NEW] = "device probed",
374 [RTE_ETH_EVENT_DESTROY] = "device released",
375 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
376 [RTE_ETH_EVENT_MAX] = NULL,
380 * Display or mask ether events
381 * Default to all events except VF_MBOX
383 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
384 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
385 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
386 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
387 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
388 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
389 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
390 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
392 * Decide if all memory are locked for performance.
397 * NIC bypass mode configuration options.
400 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
401 /* The NIC bypass watchdog timeout. */
402 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
406 #ifdef RTE_LIBRTE_LATENCY_STATS
409 * Set when latency stats is enabled in the commandline
411 uint8_t latencystats_enabled;
414 * Lcore ID to serive latency statistics.
416 lcoreid_t latencystats_lcore_id = -1;
421 * Ethernet device configuration.
423 struct rte_eth_rxmode rx_mode = {
424 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
425 /**< Default maximum frame length. */
428 struct rte_eth_txmode tx_mode = {
429 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
432 struct rte_fdir_conf fdir_conf = {
433 .mode = RTE_FDIR_MODE_NONE,
434 .pballoc = RTE_FDIR_PBALLOC_64K,
435 .status = RTE_FDIR_REPORT_STATUS,
437 .vlan_tci_mask = 0xFFEF,
439 .src_ip = 0xFFFFFFFF,
440 .dst_ip = 0xFFFFFFFF,
443 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
444 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
446 .src_port_mask = 0xFFFF,
447 .dst_port_mask = 0xFFFF,
448 .mac_addr_byte_mask = 0xFF,
449 .tunnel_type_mask = 1,
450 .tunnel_id_mask = 0xFFFFFFFF,
455 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
457 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
458 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
460 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
461 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
463 uint16_t nb_tx_queue_stats_mappings = 0;
464 uint16_t nb_rx_queue_stats_mappings = 0;
467 * Display zero values by default for xstats
469 uint8_t xstats_hide_zero;
471 unsigned int num_sockets = 0;
472 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
474 #ifdef RTE_LIBRTE_BITRATE
475 /* Bitrate statistics */
476 struct rte_stats_bitrates *bitrate_data;
477 lcoreid_t bitrate_lcore_id;
478 uint8_t bitrate_enabled;
481 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
482 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
485 * hexadecimal bitmask of RX mq mode can be enabled.
487 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
489 /* Forward function declarations */
490 static void setup_attached_port(portid_t pi);
491 static void map_port_queue_stats_mapping_registers(portid_t pi,
492 struct rte_port *port);
493 static void check_all_ports_link_status(uint32_t port_mask);
494 static int eth_event_callback(portid_t port_id,
495 enum rte_eth_event_type type,
496 void *param, void *ret_param);
497 static void dev_event_callback(const char *device_name,
498 enum rte_dev_event_type type,
502 * Check if all the ports are started.
503 * If yes, return positive value. If not, return zero.
505 static int all_ports_started(void);
507 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
508 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
510 /* Holds the registered mbuf dynamic flags names. */
511 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
514 * Helper function to check if socket is already discovered.
515 * If yes, return positive value. If not, return zero.
518 new_socket_id(unsigned int socket_id)
522 for (i = 0; i < num_sockets; i++) {
523 if (socket_ids[i] == socket_id)
530 * Setup default configuration.
533 set_default_fwd_lcores_config(void)
537 unsigned int sock_num;
540 for (i = 0; i < RTE_MAX_LCORE; i++) {
541 if (!rte_lcore_is_enabled(i))
543 sock_num = rte_lcore_to_socket_id(i);
544 if (new_socket_id(sock_num)) {
545 if (num_sockets >= RTE_MAX_NUMA_NODES) {
546 rte_exit(EXIT_FAILURE,
547 "Total sockets greater than %u\n",
550 socket_ids[num_sockets++] = sock_num;
552 if (i == rte_get_master_lcore())
554 fwd_lcores_cpuids[nb_lc++] = i;
556 nb_lcores = (lcoreid_t) nb_lc;
557 nb_cfg_lcores = nb_lcores;
562 set_def_peer_eth_addrs(void)
566 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
567 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
568 peer_eth_addrs[i].addr_bytes[5] = i;
573 set_default_fwd_ports_config(void)
578 RTE_ETH_FOREACH_DEV(pt_id) {
579 fwd_ports_ids[i++] = pt_id;
581 /* Update sockets info according to the attached device */
582 int socket_id = rte_eth_dev_socket_id(pt_id);
583 if (socket_id >= 0 && new_socket_id(socket_id)) {
584 if (num_sockets >= RTE_MAX_NUMA_NODES) {
585 rte_exit(EXIT_FAILURE,
586 "Total sockets greater than %u\n",
589 socket_ids[num_sockets++] = socket_id;
593 nb_cfg_ports = nb_ports;
594 nb_fwd_ports = nb_ports;
598 set_def_fwd_config(void)
600 set_default_fwd_lcores_config();
601 set_def_peer_eth_addrs();
602 set_default_fwd_ports_config();
605 /* extremely pessimistic estimation of memory required to create a mempool */
607 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
609 unsigned int n_pages, mbuf_per_pg, leftover;
610 uint64_t total_mem, mbuf_mem, obj_sz;
612 /* there is no good way to predict how much space the mempool will
613 * occupy because it will allocate chunks on the fly, and some of those
614 * will come from default DPDK memory while some will come from our
615 * external memory, so just assume 128MB will be enough for everyone.
617 uint64_t hdr_mem = 128 << 20;
619 /* account for possible non-contiguousness */
620 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
622 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
626 mbuf_per_pg = pgsz / obj_sz;
627 leftover = (nb_mbufs % mbuf_per_pg) > 0;
628 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
630 mbuf_mem = n_pages * pgsz;
632 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
634 if (total_mem > SIZE_MAX) {
635 TESTPMD_LOG(ERR, "Memory size too big\n");
638 *out = (size_t)total_mem;
644 pagesz_flags(uint64_t page_sz)
646 /* as per mmap() manpage, all page sizes are log2 of page size
647 * shifted by MAP_HUGE_SHIFT
649 int log2 = rte_log2_u64(page_sz);
651 return (log2 << HUGE_SHIFT);
655 alloc_mem(size_t memsz, size_t pgsz, bool huge)
660 /* allocate anonymous hugepages */
661 flags = MAP_ANONYMOUS | MAP_PRIVATE;
663 flags |= HUGE_FLAG | pagesz_flags(pgsz);
665 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
666 if (addr == MAP_FAILED)
672 struct extmem_param {
676 rte_iova_t *iova_table;
677 unsigned int iova_table_len;
681 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
684 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
685 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
686 unsigned int cur_page, n_pages, pgsz_idx;
687 size_t mem_sz, cur_pgsz;
688 rte_iova_t *iovas = NULL;
692 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
693 /* skip anything that is too big */
694 if (pgsizes[pgsz_idx] > SIZE_MAX)
697 cur_pgsz = pgsizes[pgsz_idx];
699 /* if we were told not to allocate hugepages, override */
701 cur_pgsz = sysconf(_SC_PAGESIZE);
703 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
705 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
709 /* allocate our memory */
710 addr = alloc_mem(mem_sz, cur_pgsz, huge);
712 /* if we couldn't allocate memory with a specified page size,
713 * that doesn't mean we can't do it with other page sizes, so
719 /* store IOVA addresses for every page in this memory area */
720 n_pages = mem_sz / cur_pgsz;
722 iovas = malloc(sizeof(*iovas) * n_pages);
725 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
728 /* lock memory if it's not huge pages */
732 /* populate IOVA addresses */
733 for (cur_page = 0; cur_page < n_pages; cur_page++) {
738 offset = cur_pgsz * cur_page;
739 cur = RTE_PTR_ADD(addr, offset);
741 /* touch the page before getting its IOVA */
742 *(volatile char *)cur = 0;
744 iova = rte_mem_virt2iova(cur);
746 iovas[cur_page] = iova;
751 /* if we couldn't allocate anything */
757 param->pgsz = cur_pgsz;
758 param->iova_table = iovas;
759 param->iova_table_len = n_pages;
766 munmap(addr, mem_sz);
772 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
774 struct extmem_param param;
777 memset(¶m, 0, sizeof(param));
779 /* check if our heap exists */
780 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
782 /* create our heap */
783 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
785 TESTPMD_LOG(ERR, "Cannot create heap\n");
790 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
792 TESTPMD_LOG(ERR, "Cannot create memory area\n");
796 /* we now have a valid memory area, so add it to heap */
797 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
798 param.addr, param.len, param.iova_table,
799 param.iova_table_len, param.pgsz);
801 /* when using VFIO, memory is automatically mapped for DMA by EAL */
803 /* not needed any more */
804 free(param.iova_table);
807 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
808 munmap(param.addr, param.len);
814 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
820 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
821 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
826 RTE_ETH_FOREACH_DEV(pid) {
827 struct rte_eth_dev *dev =
828 &rte_eth_devices[pid];
830 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
834 "unable to DMA unmap addr 0x%p "
836 memhdr->addr, dev->data->name);
839 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
842 "unable to un-register addr 0x%p\n", memhdr->addr);
847 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
848 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
851 size_t page_size = sysconf(_SC_PAGESIZE);
854 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
858 "unable to register addr 0x%p\n", memhdr->addr);
861 RTE_ETH_FOREACH_DEV(pid) {
862 struct rte_eth_dev *dev =
863 &rte_eth_devices[pid];
865 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
869 "unable to DMA map addr 0x%p "
871 memhdr->addr, dev->data->name);
877 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
878 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
880 struct rte_pktmbuf_extmem *xmem;
881 unsigned int ext_num, zone_num, elt_num;
884 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
885 elt_num = EXTBUF_ZONE_SIZE / elt_size;
886 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
888 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
890 TESTPMD_LOG(ERR, "Cannot allocate memory for "
891 "external buffer descriptors\n");
895 for (ext_num = 0; ext_num < zone_num; ext_num++) {
896 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
897 const struct rte_memzone *mz;
898 char mz_name[RTE_MEMZONE_NAMESIZE];
901 ret = snprintf(mz_name, sizeof(mz_name),
902 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
903 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
904 errno = ENAMETOOLONG;
908 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
910 RTE_MEMZONE_IOVA_CONTIG |
912 RTE_MEMZONE_SIZE_HINT_ONLY,
916 * The caller exits on external buffer creation
917 * error, so there is no need to free memzones.
923 xseg->buf_ptr = mz->addr;
924 xseg->buf_iova = mz->iova;
925 xseg->buf_len = EXTBUF_ZONE_SIZE;
926 xseg->elt_size = elt_size;
928 if (ext_num == 0 && xmem != NULL) {
937 * Configuration initialisation done once at init time.
939 static struct rte_mempool *
940 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
941 unsigned int socket_id)
943 char pool_name[RTE_MEMPOOL_NAMESIZE];
944 struct rte_mempool *rte_mp = NULL;
947 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
948 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
951 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
952 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
954 switch (mp_alloc_type) {
955 case MP_ALLOC_NATIVE:
957 /* wrapper to rte_mempool_create() */
958 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
959 rte_mbuf_best_mempool_ops());
960 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
961 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
966 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
967 mb_size, (unsigned int) mb_mempool_cache,
968 sizeof(struct rte_pktmbuf_pool_private),
969 socket_id, mempool_flags);
973 if (rte_mempool_populate_anon(rte_mp) == 0) {
974 rte_mempool_free(rte_mp);
978 rte_pktmbuf_pool_init(rte_mp, NULL);
979 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
980 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
984 case MP_ALLOC_XMEM_HUGE:
987 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
989 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
990 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
993 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
995 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
997 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
998 rte_mbuf_best_mempool_ops());
999 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1000 mb_mempool_cache, 0, mbuf_seg_size,
1006 struct rte_pktmbuf_extmem *ext_mem;
1007 unsigned int ext_num;
1009 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1010 socket_id, pool_name, &ext_mem);
1012 rte_exit(EXIT_FAILURE,
1013 "Can't create pinned data buffers\n");
1015 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1016 rte_mbuf_best_mempool_ops());
1017 rte_mp = rte_pktmbuf_pool_create_extbuf
1018 (pool_name, nb_mbuf, mb_mempool_cache,
1019 0, mbuf_seg_size, socket_id,
1026 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1031 if (rte_mp == NULL) {
1032 rte_exit(EXIT_FAILURE,
1033 "Creation of mbuf pool for socket %u failed: %s\n",
1034 socket_id, rte_strerror(rte_errno));
1035 } else if (verbose_level > 0) {
1036 rte_mempool_dump(stdout, rte_mp);
1042 * Check given socket id is valid or not with NUMA mode,
1043 * if valid, return 0, else return -1
1046 check_socket_id(const unsigned int socket_id)
1048 static int warning_once = 0;
1050 if (new_socket_id(socket_id)) {
1051 if (!warning_once && numa_support)
1052 printf("Warning: NUMA should be configured manually by"
1053 " using --port-numa-config and"
1054 " --ring-numa-config parameters along with"
1063 * Get the allowed maximum number of RX queues.
1064 * *pid return the port id which has minimal value of
1065 * max_rx_queues in all ports.
1068 get_allowed_max_nb_rxq(portid_t *pid)
1070 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1071 bool max_rxq_valid = false;
1073 struct rte_eth_dev_info dev_info;
1075 RTE_ETH_FOREACH_DEV(pi) {
1076 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1079 max_rxq_valid = true;
1080 if (dev_info.max_rx_queues < allowed_max_rxq) {
1081 allowed_max_rxq = dev_info.max_rx_queues;
1085 return max_rxq_valid ? allowed_max_rxq : 0;
1089 * Check input rxq is valid or not.
1090 * If input rxq is not greater than any of maximum number
1091 * of RX queues of all ports, it is valid.
1092 * if valid, return 0, else return -1
1095 check_nb_rxq(queueid_t rxq)
1097 queueid_t allowed_max_rxq;
1100 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1101 if (rxq > allowed_max_rxq) {
1102 printf("Fail: input rxq (%u) can't be greater "
1103 "than max_rx_queues (%u) of port %u\n",
1113 * Get the allowed maximum number of TX queues.
1114 * *pid return the port id which has minimal value of
1115 * max_tx_queues in all ports.
1118 get_allowed_max_nb_txq(portid_t *pid)
1120 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1121 bool max_txq_valid = false;
1123 struct rte_eth_dev_info dev_info;
1125 RTE_ETH_FOREACH_DEV(pi) {
1126 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1129 max_txq_valid = true;
1130 if (dev_info.max_tx_queues < allowed_max_txq) {
1131 allowed_max_txq = dev_info.max_tx_queues;
1135 return max_txq_valid ? allowed_max_txq : 0;
1139 * Check input txq is valid or not.
1140 * If input txq is not greater than any of maximum number
1141 * of TX queues of all ports, it is valid.
1142 * if valid, return 0, else return -1
1145 check_nb_txq(queueid_t txq)
1147 queueid_t allowed_max_txq;
1150 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1151 if (txq > allowed_max_txq) {
1152 printf("Fail: input txq (%u) can't be greater "
1153 "than max_tx_queues (%u) of port %u\n",
1163 * Get the allowed maximum number of RXDs of every rx queue.
1164 * *pid return the port id which has minimal value of
1165 * max_rxd in all queues of all ports.
1168 get_allowed_max_nb_rxd(portid_t *pid)
1170 uint16_t allowed_max_rxd = UINT16_MAX;
1172 struct rte_eth_dev_info dev_info;
1174 RTE_ETH_FOREACH_DEV(pi) {
1175 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1178 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1179 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1183 return allowed_max_rxd;
1187 * Get the allowed minimal number of RXDs of every rx queue.
1188 * *pid return the port id which has minimal value of
1189 * min_rxd in all queues of all ports.
1192 get_allowed_min_nb_rxd(portid_t *pid)
1194 uint16_t allowed_min_rxd = 0;
1196 struct rte_eth_dev_info dev_info;
1198 RTE_ETH_FOREACH_DEV(pi) {
1199 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1202 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1203 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1208 return allowed_min_rxd;
1212 * Check input rxd is valid or not.
1213 * If input rxd is not greater than any of maximum number
1214 * of RXDs of every Rx queues and is not less than any of
1215 * minimal number of RXDs of every Rx queues, it is valid.
1216 * if valid, return 0, else return -1
1219 check_nb_rxd(queueid_t rxd)
1221 uint16_t allowed_max_rxd;
1222 uint16_t allowed_min_rxd;
1225 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1226 if (rxd > allowed_max_rxd) {
1227 printf("Fail: input rxd (%u) can't be greater "
1228 "than max_rxds (%u) of port %u\n",
1235 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1236 if (rxd < allowed_min_rxd) {
1237 printf("Fail: input rxd (%u) can't be less "
1238 "than min_rxds (%u) of port %u\n",
1249 * Get the allowed maximum number of TXDs of every rx queues.
1250 * *pid return the port id which has minimal value of
1251 * max_txd in every tx queue.
1254 get_allowed_max_nb_txd(portid_t *pid)
1256 uint16_t allowed_max_txd = UINT16_MAX;
1258 struct rte_eth_dev_info dev_info;
1260 RTE_ETH_FOREACH_DEV(pi) {
1261 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1264 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1265 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1269 return allowed_max_txd;
1273 * Get the allowed maximum number of TXDs of every tx queues.
1274 * *pid return the port id which has minimal value of
1275 * min_txd in every tx queue.
1278 get_allowed_min_nb_txd(portid_t *pid)
1280 uint16_t allowed_min_txd = 0;
1282 struct rte_eth_dev_info dev_info;
1284 RTE_ETH_FOREACH_DEV(pi) {
1285 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1288 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1289 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1294 return allowed_min_txd;
1298 * Check input txd is valid or not.
1299 * If input txd is not greater than any of maximum number
1300 * of TXDs of every Rx queues, it is valid.
1301 * if valid, return 0, else return -1
1304 check_nb_txd(queueid_t txd)
1306 uint16_t allowed_max_txd;
1307 uint16_t allowed_min_txd;
1310 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1311 if (txd > allowed_max_txd) {
1312 printf("Fail: input txd (%u) can't be greater "
1313 "than max_txds (%u) of port %u\n",
1320 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1321 if (txd < allowed_min_txd) {
1322 printf("Fail: input txd (%u) can't be less "
1323 "than min_txds (%u) of port %u\n",
1334 * Get the allowed maximum number of hairpin queues.
1335 * *pid return the port id which has minimal value of
1336 * max_hairpin_queues in all ports.
1339 get_allowed_max_nb_hairpinq(portid_t *pid)
1341 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1343 struct rte_eth_hairpin_cap cap;
1345 RTE_ETH_FOREACH_DEV(pi) {
1346 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1350 if (cap.max_nb_queues < allowed_max_hairpinq) {
1351 allowed_max_hairpinq = cap.max_nb_queues;
1355 return allowed_max_hairpinq;
1359 * Check input hairpin is valid or not.
1360 * If input hairpin is not greater than any of maximum number
1361 * of hairpin queues of all ports, it is valid.
1362 * if valid, return 0, else return -1
1365 check_nb_hairpinq(queueid_t hairpinq)
1367 queueid_t allowed_max_hairpinq;
1370 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1371 if (hairpinq > allowed_max_hairpinq) {
1372 printf("Fail: input hairpin (%u) can't be greater "
1373 "than max_hairpin_queues (%u) of port %u\n",
1374 hairpinq, allowed_max_hairpinq, pid);
1384 struct rte_port *port;
1385 struct rte_mempool *mbp;
1386 unsigned int nb_mbuf_per_pool;
1388 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1389 struct rte_gro_param gro_param;
1396 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1398 /* Configuration of logical cores. */
1399 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1400 sizeof(struct fwd_lcore *) * nb_lcores,
1401 RTE_CACHE_LINE_SIZE);
1402 if (fwd_lcores == NULL) {
1403 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1404 "failed\n", nb_lcores);
1406 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1407 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1408 sizeof(struct fwd_lcore),
1409 RTE_CACHE_LINE_SIZE);
1410 if (fwd_lcores[lc_id] == NULL) {
1411 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1414 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1417 RTE_ETH_FOREACH_DEV(pid) {
1419 /* Apply default TxRx configuration for all ports */
1420 port->dev_conf.txmode = tx_mode;
1421 port->dev_conf.rxmode = rx_mode;
1423 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1425 rte_exit(EXIT_FAILURE,
1426 "rte_eth_dev_info_get() failed\n");
1428 if (!(port->dev_info.tx_offload_capa &
1429 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1430 port->dev_conf.txmode.offloads &=
1431 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1433 if (port_numa[pid] != NUMA_NO_CONFIG)
1434 port_per_socket[port_numa[pid]]++;
1436 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1439 * if socket_id is invalid,
1440 * set to the first available socket.
1442 if (check_socket_id(socket_id) < 0)
1443 socket_id = socket_ids[0];
1444 port_per_socket[socket_id]++;
1448 /* Apply Rx offloads configuration */
1449 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1450 port->rx_conf[k].offloads =
1451 port->dev_conf.rxmode.offloads;
1452 /* Apply Tx offloads configuration */
1453 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1454 port->tx_conf[k].offloads =
1455 port->dev_conf.txmode.offloads;
1457 /* set flag to initialize port/queue */
1458 port->need_reconfig = 1;
1459 port->need_reconfig_queues = 1;
1460 port->tx_metadata = 0;
1462 /* Check for maximum number of segments per MTU. Accordingly
1463 * update the mbuf data size.
1465 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1466 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1467 data_size = rx_mode.max_rx_pkt_len /
1468 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1470 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1472 mbuf_data_size = data_size +
1473 RTE_PKTMBUF_HEADROOM;
1480 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1484 * Create pools of mbuf.
1485 * If NUMA support is disabled, create a single pool of mbuf in
1486 * socket 0 memory by default.
1487 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1489 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1490 * nb_txd can be configured at run time.
1492 if (param_total_num_mbufs)
1493 nb_mbuf_per_pool = param_total_num_mbufs;
1495 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1496 (nb_lcores * mb_mempool_cache) +
1497 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1498 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1504 for (i = 0; i < num_sockets; i++)
1505 mempools[i] = mbuf_pool_create(mbuf_data_size,
1509 if (socket_num == UMA_NO_CONFIG)
1510 mempools[0] = mbuf_pool_create(mbuf_data_size,
1511 nb_mbuf_per_pool, 0);
1513 mempools[socket_num] = mbuf_pool_create
1521 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1522 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1524 * Records which Mbuf pool to use by each logical core, if needed.
1526 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1527 mbp = mbuf_pool_find(
1528 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1531 mbp = mbuf_pool_find(0);
1532 fwd_lcores[lc_id]->mbp = mbp;
1533 /* initialize GSO context */
1534 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1535 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1536 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1537 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1539 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1542 /* Configuration of packet forwarding streams. */
1543 if (init_fwd_streams() < 0)
1544 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1548 /* create a gro context for each lcore */
1549 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1550 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1551 gro_param.max_item_per_flow = MAX_PKT_BURST;
1552 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1553 gro_param.socket_id = rte_lcore_to_socket_id(
1554 fwd_lcores_cpuids[lc_id]);
1555 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1556 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1557 rte_exit(EXIT_FAILURE,
1558 "rte_gro_ctx_create() failed\n");
1565 reconfig(portid_t new_port_id, unsigned socket_id)
1567 struct rte_port *port;
1570 /* Reconfiguration of Ethernet ports. */
1571 port = &ports[new_port_id];
1573 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1577 /* set flag to initialize port/queue */
1578 port->need_reconfig = 1;
1579 port->need_reconfig_queues = 1;
1580 port->socket_id = socket_id;
1587 init_fwd_streams(void)
1590 struct rte_port *port;
1591 streamid_t sm_id, nb_fwd_streams_new;
1594 /* set socket id according to numa or not */
1595 RTE_ETH_FOREACH_DEV(pid) {
1597 if (nb_rxq > port->dev_info.max_rx_queues) {
1598 printf("Fail: nb_rxq(%d) is greater than "
1599 "max_rx_queues(%d)\n", nb_rxq,
1600 port->dev_info.max_rx_queues);
1603 if (nb_txq > port->dev_info.max_tx_queues) {
1604 printf("Fail: nb_txq(%d) is greater than "
1605 "max_tx_queues(%d)\n", nb_txq,
1606 port->dev_info.max_tx_queues);
1610 if (port_numa[pid] != NUMA_NO_CONFIG)
1611 port->socket_id = port_numa[pid];
1613 port->socket_id = rte_eth_dev_socket_id(pid);
1616 * if socket_id is invalid,
1617 * set to the first available socket.
1619 if (check_socket_id(port->socket_id) < 0)
1620 port->socket_id = socket_ids[0];
1624 if (socket_num == UMA_NO_CONFIG)
1625 port->socket_id = 0;
1627 port->socket_id = socket_num;
1631 q = RTE_MAX(nb_rxq, nb_txq);
1633 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1636 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1637 if (nb_fwd_streams_new == nb_fwd_streams)
1640 if (fwd_streams != NULL) {
1641 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1642 if (fwd_streams[sm_id] == NULL)
1644 rte_free(fwd_streams[sm_id]);
1645 fwd_streams[sm_id] = NULL;
1647 rte_free(fwd_streams);
1652 nb_fwd_streams = nb_fwd_streams_new;
1653 if (nb_fwd_streams) {
1654 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1655 sizeof(struct fwd_stream *) * nb_fwd_streams,
1656 RTE_CACHE_LINE_SIZE);
1657 if (fwd_streams == NULL)
1658 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1659 " (struct fwd_stream *)) failed\n",
1662 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1663 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1664 " struct fwd_stream", sizeof(struct fwd_stream),
1665 RTE_CACHE_LINE_SIZE);
1666 if (fwd_streams[sm_id] == NULL)
1667 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1668 "(struct fwd_stream) failed\n");
1675 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1677 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1679 uint64_t total_burst, sburst;
1681 uint64_t burst_stats[4];
1682 uint16_t pktnb_stats[4];
1684 int burst_percent[4], sburstp;
1688 * First compute the total number of packet bursts and the
1689 * two highest numbers of bursts of the same number of packets.
1691 memset(&burst_stats, 0x0, sizeof(burst_stats));
1692 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1694 /* Show stats for 0 burst size always */
1695 total_burst = pbs->pkt_burst_spread[0];
1696 burst_stats[0] = pbs->pkt_burst_spread[0];
1699 /* Find the next 2 burst sizes with highest occurrences. */
1700 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1701 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1706 total_burst += nb_burst;
1708 if (nb_burst > burst_stats[1]) {
1709 burst_stats[2] = burst_stats[1];
1710 pktnb_stats[2] = pktnb_stats[1];
1711 burst_stats[1] = nb_burst;
1712 pktnb_stats[1] = nb_pkt;
1713 } else if (nb_burst > burst_stats[2]) {
1714 burst_stats[2] = nb_burst;
1715 pktnb_stats[2] = nb_pkt;
1718 if (total_burst == 0)
1721 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1722 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1724 printf("%d%% of other]\n", 100 - sburstp);
1728 sburst += burst_stats[i];
1729 if (sburst == total_burst) {
1730 printf("%d%% of %d pkts]\n",
1731 100 - sburstp, (int) pktnb_stats[i]);
1736 (double)burst_stats[i] / total_burst * 100;
1737 printf("%d%% of %d pkts + ",
1738 burst_percent[i], (int) pktnb_stats[i]);
1739 sburstp += burst_percent[i];
1742 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1745 fwd_stream_stats_display(streamid_t stream_id)
1747 struct fwd_stream *fs;
1748 static const char *fwd_top_stats_border = "-------";
1750 fs = fwd_streams[stream_id];
1751 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1752 (fs->fwd_dropped == 0))
1754 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1755 "TX Port=%2d/Queue=%2d %s\n",
1756 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1757 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1758 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1759 " TX-dropped: %-14"PRIu64,
1760 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1762 /* if checksum mode */
1763 if (cur_fwd_eng == &csum_fwd_engine) {
1764 printf(" RX- bad IP checksum: %-14"PRIu64
1765 " Rx- bad L4 checksum: %-14"PRIu64
1766 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1767 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1768 fs->rx_bad_outer_l4_csum);
1773 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1774 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1775 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1780 fwd_stats_display(void)
1782 static const char *fwd_stats_border = "----------------------";
1783 static const char *acc_stats_border = "+++++++++++++++";
1785 struct fwd_stream *rx_stream;
1786 struct fwd_stream *tx_stream;
1787 uint64_t tx_dropped;
1788 uint64_t rx_bad_ip_csum;
1789 uint64_t rx_bad_l4_csum;
1790 uint64_t rx_bad_outer_l4_csum;
1791 } ports_stats[RTE_MAX_ETHPORTS];
1792 uint64_t total_rx_dropped = 0;
1793 uint64_t total_tx_dropped = 0;
1794 uint64_t total_rx_nombuf = 0;
1795 struct rte_eth_stats stats;
1796 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1797 uint64_t fwd_cycles = 0;
1799 uint64_t total_recv = 0;
1800 uint64_t total_xmit = 0;
1801 struct rte_port *port;
1806 memset(ports_stats, 0, sizeof(ports_stats));
1808 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1809 struct fwd_stream *fs = fwd_streams[sm_id];
1811 if (cur_fwd_config.nb_fwd_streams >
1812 cur_fwd_config.nb_fwd_ports) {
1813 fwd_stream_stats_display(sm_id);
1815 ports_stats[fs->tx_port].tx_stream = fs;
1816 ports_stats[fs->rx_port].rx_stream = fs;
1819 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1821 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1822 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1823 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1824 fs->rx_bad_outer_l4_csum;
1826 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1827 fwd_cycles += fs->core_cycles;
1830 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1833 pt_id = fwd_ports_ids[i];
1834 port = &ports[pt_id];
1836 rte_eth_stats_get(pt_id, &stats);
1837 stats.ipackets -= port->stats.ipackets;
1838 stats.opackets -= port->stats.opackets;
1839 stats.ibytes -= port->stats.ibytes;
1840 stats.obytes -= port->stats.obytes;
1841 stats.imissed -= port->stats.imissed;
1842 stats.oerrors -= port->stats.oerrors;
1843 stats.rx_nombuf -= port->stats.rx_nombuf;
1845 total_recv += stats.ipackets;
1846 total_xmit += stats.opackets;
1847 total_rx_dropped += stats.imissed;
1848 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1849 total_tx_dropped += stats.oerrors;
1850 total_rx_nombuf += stats.rx_nombuf;
1852 printf("\n %s Forward statistics for port %-2d %s\n",
1853 fwd_stats_border, pt_id, fwd_stats_border);
1855 if (!port->rx_queue_stats_mapping_enabled &&
1856 !port->tx_queue_stats_mapping_enabled) {
1857 printf(" RX-packets: %-14"PRIu64
1858 " RX-dropped: %-14"PRIu64
1859 "RX-total: %-"PRIu64"\n",
1860 stats.ipackets, stats.imissed,
1861 stats.ipackets + stats.imissed);
1863 if (cur_fwd_eng == &csum_fwd_engine)
1864 printf(" Bad-ipcsum: %-14"PRIu64
1865 " Bad-l4csum: %-14"PRIu64
1866 "Bad-outer-l4csum: %-14"PRIu64"\n",
1867 ports_stats[pt_id].rx_bad_ip_csum,
1868 ports_stats[pt_id].rx_bad_l4_csum,
1869 ports_stats[pt_id].rx_bad_outer_l4_csum);
1870 if (stats.ierrors + stats.rx_nombuf > 0) {
1871 printf(" RX-error: %-"PRIu64"\n",
1873 printf(" RX-nombufs: %-14"PRIu64"\n",
1877 printf(" TX-packets: %-14"PRIu64
1878 " TX-dropped: %-14"PRIu64
1879 "TX-total: %-"PRIu64"\n",
1880 stats.opackets, ports_stats[pt_id].tx_dropped,
1881 stats.opackets + ports_stats[pt_id].tx_dropped);
1883 printf(" RX-packets: %14"PRIu64
1884 " RX-dropped:%14"PRIu64
1885 " RX-total:%14"PRIu64"\n",
1886 stats.ipackets, stats.imissed,
1887 stats.ipackets + stats.imissed);
1889 if (cur_fwd_eng == &csum_fwd_engine)
1890 printf(" Bad-ipcsum:%14"PRIu64
1891 " Bad-l4csum:%14"PRIu64
1892 " Bad-outer-l4csum: %-14"PRIu64"\n",
1893 ports_stats[pt_id].rx_bad_ip_csum,
1894 ports_stats[pt_id].rx_bad_l4_csum,
1895 ports_stats[pt_id].rx_bad_outer_l4_csum);
1896 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1897 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1898 printf(" RX-nombufs: %14"PRIu64"\n",
1902 printf(" TX-packets: %14"PRIu64
1903 " TX-dropped:%14"PRIu64
1904 " TX-total:%14"PRIu64"\n",
1905 stats.opackets, ports_stats[pt_id].tx_dropped,
1906 stats.opackets + ports_stats[pt_id].tx_dropped);
1909 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1910 if (ports_stats[pt_id].rx_stream)
1911 pkt_burst_stats_display("RX",
1912 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1913 if (ports_stats[pt_id].tx_stream)
1914 pkt_burst_stats_display("TX",
1915 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1918 if (port->rx_queue_stats_mapping_enabled) {
1920 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1921 printf(" Stats reg %2d RX-packets:%14"PRIu64
1922 " RX-errors:%14"PRIu64
1923 " RX-bytes:%14"PRIu64"\n",
1924 j, stats.q_ipackets[j],
1925 stats.q_errors[j], stats.q_ibytes[j]);
1929 if (port->tx_queue_stats_mapping_enabled) {
1930 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1931 printf(" Stats reg %2d TX-packets:%14"PRIu64
1934 j, stats.q_opackets[j],
1939 printf(" %s--------------------------------%s\n",
1940 fwd_stats_border, fwd_stats_border);
1943 printf("\n %s Accumulated forward statistics for all ports"
1945 acc_stats_border, acc_stats_border);
1946 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1948 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1950 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1951 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1952 if (total_rx_nombuf > 0)
1953 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1954 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1956 acc_stats_border, acc_stats_border);
1957 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1958 #define CYC_PER_MHZ 1E6
1960 printf("\n CPU cycles/packet=%.2F (total cycles="
1961 "%"PRIu64" / total RX packets=%"PRIu64") at %"PRIu64
1963 (double) fwd_cycles / total_recv,
1964 fwd_cycles, total_recv,
1965 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1970 fwd_stats_reset(void)
1976 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1977 pt_id = fwd_ports_ids[i];
1978 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1980 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1981 struct fwd_stream *fs = fwd_streams[sm_id];
1985 fs->fwd_dropped = 0;
1986 fs->rx_bad_ip_csum = 0;
1987 fs->rx_bad_l4_csum = 0;
1988 fs->rx_bad_outer_l4_csum = 0;
1990 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1991 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1992 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1994 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1995 fs->core_cycles = 0;
2001 flush_fwd_rx_queues(void)
2003 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2010 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2011 uint64_t timer_period;
2013 /* convert to number of cycles */
2014 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2016 for (j = 0; j < 2; j++) {
2017 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2018 for (rxq = 0; rxq < nb_rxq; rxq++) {
2019 port_id = fwd_ports_ids[rxp];
2021 * testpmd can stuck in the below do while loop
2022 * if rte_eth_rx_burst() always returns nonzero
2023 * packets. So timer is added to exit this loop
2024 * after 1sec timer expiry.
2026 prev_tsc = rte_rdtsc();
2028 nb_rx = rte_eth_rx_burst(port_id, rxq,
2029 pkts_burst, MAX_PKT_BURST);
2030 for (i = 0; i < nb_rx; i++)
2031 rte_pktmbuf_free(pkts_burst[i]);
2033 cur_tsc = rte_rdtsc();
2034 diff_tsc = cur_tsc - prev_tsc;
2035 timer_tsc += diff_tsc;
2036 } while ((nb_rx > 0) &&
2037 (timer_tsc < timer_period));
2041 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2046 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2048 struct fwd_stream **fsm;
2051 #ifdef RTE_LIBRTE_BITRATE
2052 uint64_t tics_per_1sec;
2053 uint64_t tics_datum;
2054 uint64_t tics_current;
2055 uint16_t i, cnt_ports;
2057 cnt_ports = nb_ports;
2058 tics_datum = rte_rdtsc();
2059 tics_per_1sec = rte_get_timer_hz();
2061 fsm = &fwd_streams[fc->stream_idx];
2062 nb_fs = fc->stream_nb;
2064 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2065 (*pkt_fwd)(fsm[sm_id]);
2066 #ifdef RTE_LIBRTE_BITRATE
2067 if (bitrate_enabled != 0 &&
2068 bitrate_lcore_id == rte_lcore_id()) {
2069 tics_current = rte_rdtsc();
2070 if (tics_current - tics_datum >= tics_per_1sec) {
2071 /* Periodic bitrate calculation */
2072 for (i = 0; i < cnt_ports; i++)
2073 rte_stats_bitrate_calc(bitrate_data,
2075 tics_datum = tics_current;
2079 #ifdef RTE_LIBRTE_LATENCY_STATS
2080 if (latencystats_enabled != 0 &&
2081 latencystats_lcore_id == rte_lcore_id())
2082 rte_latencystats_update();
2085 } while (! fc->stopped);
2089 start_pkt_forward_on_core(void *fwd_arg)
2091 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2092 cur_fwd_config.fwd_eng->packet_fwd);
2097 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2098 * Used to start communication flows in network loopback test configurations.
2101 run_one_txonly_burst_on_core(void *fwd_arg)
2103 struct fwd_lcore *fwd_lc;
2104 struct fwd_lcore tmp_lcore;
2106 fwd_lc = (struct fwd_lcore *) fwd_arg;
2107 tmp_lcore = *fwd_lc;
2108 tmp_lcore.stopped = 1;
2109 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2114 * Launch packet forwarding:
2115 * - Setup per-port forwarding context.
2116 * - launch logical cores with their forwarding configuration.
2119 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2121 port_fwd_begin_t port_fwd_begin;
2126 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2127 if (port_fwd_begin != NULL) {
2128 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2129 (*port_fwd_begin)(fwd_ports_ids[i]);
2131 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2132 lc_id = fwd_lcores_cpuids[i];
2133 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2134 fwd_lcores[i]->stopped = 0;
2135 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2136 fwd_lcores[i], lc_id);
2138 printf("launch lcore %u failed - diag=%d\n",
2145 * Launch packet forwarding configuration.
2148 start_packet_forwarding(int with_tx_first)
2150 port_fwd_begin_t port_fwd_begin;
2151 port_fwd_end_t port_fwd_end;
2152 struct rte_port *port;
2156 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2157 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2159 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2160 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2162 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2163 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2164 (!nb_rxq || !nb_txq))
2165 rte_exit(EXIT_FAILURE,
2166 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2167 cur_fwd_eng->fwd_mode_name);
2169 if (all_ports_started() == 0) {
2170 printf("Not all ports were started\n");
2173 if (test_done == 0) {
2174 printf("Packet forwarding already started\n");
2180 for (i = 0; i < nb_fwd_ports; i++) {
2181 pt_id = fwd_ports_ids[i];
2182 port = &ports[pt_id];
2183 if (!port->dcb_flag) {
2184 printf("In DCB mode, all forwarding ports must "
2185 "be configured in this mode.\n");
2189 if (nb_fwd_lcores == 1) {
2190 printf("In DCB mode,the nb forwarding cores "
2191 "should be larger than 1.\n");
2200 flush_fwd_rx_queues();
2202 pkt_fwd_config_display(&cur_fwd_config);
2203 rxtx_config_display();
2206 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2207 pt_id = fwd_ports_ids[i];
2208 port = &ports[pt_id];
2209 map_port_queue_stats_mapping_registers(pt_id, port);
2211 if (with_tx_first) {
2212 port_fwd_begin = tx_only_engine.port_fwd_begin;
2213 if (port_fwd_begin != NULL) {
2214 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2215 (*port_fwd_begin)(fwd_ports_ids[i]);
2217 while (with_tx_first--) {
2218 launch_packet_forwarding(
2219 run_one_txonly_burst_on_core);
2220 rte_eal_mp_wait_lcore();
2222 port_fwd_end = tx_only_engine.port_fwd_end;
2223 if (port_fwd_end != NULL) {
2224 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2225 (*port_fwd_end)(fwd_ports_ids[i]);
2228 launch_packet_forwarding(start_pkt_forward_on_core);
2232 stop_packet_forwarding(void)
2234 port_fwd_end_t port_fwd_end;
2240 printf("Packet forwarding not started\n");
2243 printf("Telling cores to stop...");
2244 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2245 fwd_lcores[lc_id]->stopped = 1;
2246 printf("\nWaiting for lcores to finish...\n");
2247 rte_eal_mp_wait_lcore();
2248 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2249 if (port_fwd_end != NULL) {
2250 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2251 pt_id = fwd_ports_ids[i];
2252 (*port_fwd_end)(pt_id);
2256 fwd_stats_display();
2258 printf("\nDone.\n");
2263 dev_set_link_up(portid_t pid)
2265 if (rte_eth_dev_set_link_up(pid) < 0)
2266 printf("\nSet link up fail.\n");
2270 dev_set_link_down(portid_t pid)
2272 if (rte_eth_dev_set_link_down(pid) < 0)
2273 printf("\nSet link down fail.\n");
2277 all_ports_started(void)
2280 struct rte_port *port;
2282 RTE_ETH_FOREACH_DEV(pi) {
2284 /* Check if there is a port which is not started */
2285 if ((port->port_status != RTE_PORT_STARTED) &&
2286 (port->slave_flag == 0))
2290 /* No port is not started */
2295 port_is_stopped(portid_t port_id)
2297 struct rte_port *port = &ports[port_id];
2299 if ((port->port_status != RTE_PORT_STOPPED) &&
2300 (port->slave_flag == 0))
2306 all_ports_stopped(void)
2310 RTE_ETH_FOREACH_DEV(pi) {
2311 if (!port_is_stopped(pi))
2319 port_is_started(portid_t port_id)
2321 if (port_id_is_invalid(port_id, ENABLED_WARN))
2324 if (ports[port_id].port_status != RTE_PORT_STARTED)
2330 /* Configure the Rx and Tx hairpin queues for the selected port. */
2332 setup_hairpin_queues(portid_t pi)
2335 struct rte_eth_hairpin_conf hairpin_conf = {
2340 struct rte_port *port = &ports[pi];
2342 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2343 hairpin_conf.peers[0].port = pi;
2344 hairpin_conf.peers[0].queue = i + nb_rxq;
2345 diag = rte_eth_tx_hairpin_queue_setup
2346 (pi, qi, nb_txd, &hairpin_conf);
2351 /* Fail to setup rx queue, return */
2352 if (rte_atomic16_cmpset(&(port->port_status),
2354 RTE_PORT_STOPPED) == 0)
2355 printf("Port %d can not be set back "
2356 "to stopped\n", pi);
2357 printf("Fail to configure port %d hairpin "
2359 /* try to reconfigure queues next time */
2360 port->need_reconfig_queues = 1;
2363 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2364 hairpin_conf.peers[0].port = pi;
2365 hairpin_conf.peers[0].queue = i + nb_txq;
2366 diag = rte_eth_rx_hairpin_queue_setup
2367 (pi, qi, nb_rxd, &hairpin_conf);
2372 /* Fail to setup rx queue, return */
2373 if (rte_atomic16_cmpset(&(port->port_status),
2375 RTE_PORT_STOPPED) == 0)
2376 printf("Port %d can not be set back "
2377 "to stopped\n", pi);
2378 printf("Fail to configure port %d hairpin "
2380 /* try to reconfigure queues next time */
2381 port->need_reconfig_queues = 1;
2388 start_port(portid_t pid)
2390 int diag, need_check_link_status = -1;
2393 struct rte_port *port;
2394 struct rte_ether_addr mac_addr;
2395 struct rte_eth_hairpin_cap cap;
2397 if (port_id_is_invalid(pid, ENABLED_WARN))
2402 RTE_ETH_FOREACH_DEV(pi) {
2403 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2406 need_check_link_status = 0;
2408 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2409 RTE_PORT_HANDLING) == 0) {
2410 printf("Port %d is now not stopped\n", pi);
2414 if (port->need_reconfig > 0) {
2415 port->need_reconfig = 0;
2417 if (flow_isolate_all) {
2418 int ret = port_flow_isolate(pi, 1);
2420 printf("Failed to apply isolated"
2421 " mode on port %d\n", pi);
2425 configure_rxtx_dump_callbacks(0);
2426 printf("Configuring Port %d (socket %u)\n", pi,
2428 if (nb_hairpinq > 0 &&
2429 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2430 printf("Port %d doesn't support hairpin "
2434 /* configure port */
2435 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2436 nb_txq + nb_hairpinq,
2439 if (rte_atomic16_cmpset(&(port->port_status),
2440 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2441 printf("Port %d can not be set back "
2442 "to stopped\n", pi);
2443 printf("Fail to configure port %d\n", pi);
2444 /* try to reconfigure port next time */
2445 port->need_reconfig = 1;
2449 if (port->need_reconfig_queues > 0) {
2450 port->need_reconfig_queues = 0;
2451 /* setup tx queues */
2452 for (qi = 0; qi < nb_txq; qi++) {
2453 if ((numa_support) &&
2454 (txring_numa[pi] != NUMA_NO_CONFIG))
2455 diag = rte_eth_tx_queue_setup(pi, qi,
2456 port->nb_tx_desc[qi],
2458 &(port->tx_conf[qi]));
2460 diag = rte_eth_tx_queue_setup(pi, qi,
2461 port->nb_tx_desc[qi],
2463 &(port->tx_conf[qi]));
2468 /* Fail to setup tx queue, return */
2469 if (rte_atomic16_cmpset(&(port->port_status),
2471 RTE_PORT_STOPPED) == 0)
2472 printf("Port %d can not be set back "
2473 "to stopped\n", pi);
2474 printf("Fail to configure port %d tx queues\n",
2476 /* try to reconfigure queues next time */
2477 port->need_reconfig_queues = 1;
2480 for (qi = 0; qi < nb_rxq; qi++) {
2481 /* setup rx queues */
2482 if ((numa_support) &&
2483 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2484 struct rte_mempool * mp =
2485 mbuf_pool_find(rxring_numa[pi]);
2487 printf("Failed to setup RX queue:"
2488 "No mempool allocation"
2489 " on the socket %d\n",
2494 diag = rte_eth_rx_queue_setup(pi, qi,
2495 port->nb_rx_desc[qi],
2497 &(port->rx_conf[qi]),
2500 struct rte_mempool *mp =
2501 mbuf_pool_find(port->socket_id);
2503 printf("Failed to setup RX queue:"
2504 "No mempool allocation"
2505 " on the socket %d\n",
2509 diag = rte_eth_rx_queue_setup(pi, qi,
2510 port->nb_rx_desc[qi],
2512 &(port->rx_conf[qi]),
2518 /* Fail to setup rx queue, return */
2519 if (rte_atomic16_cmpset(&(port->port_status),
2521 RTE_PORT_STOPPED) == 0)
2522 printf("Port %d can not be set back "
2523 "to stopped\n", pi);
2524 printf("Fail to configure port %d rx queues\n",
2526 /* try to reconfigure queues next time */
2527 port->need_reconfig_queues = 1;
2530 /* setup hairpin queues */
2531 if (setup_hairpin_queues(pi) != 0)
2534 configure_rxtx_dump_callbacks(verbose_level);
2536 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2540 "Port %d: Failed to disable Ptype parsing\n",
2545 if (rte_eth_dev_start(pi) < 0) {
2546 printf("Fail to start port %d\n", pi);
2548 /* Fail to setup rx queue, return */
2549 if (rte_atomic16_cmpset(&(port->port_status),
2550 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2551 printf("Port %d can not be set back to "
2556 if (rte_atomic16_cmpset(&(port->port_status),
2557 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2558 printf("Port %d can not be set into started\n", pi);
2560 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2561 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2562 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2563 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2564 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2566 /* at least one port started, need checking link status */
2567 need_check_link_status = 1;
2570 if (need_check_link_status == 1 && !no_link_check)
2571 check_all_ports_link_status(RTE_PORT_ALL);
2572 else if (need_check_link_status == 0)
2573 printf("Please stop the ports first\n");
2580 stop_port(portid_t pid)
2583 struct rte_port *port;
2584 int need_check_link_status = 0;
2591 if (port_id_is_invalid(pid, ENABLED_WARN))
2594 printf("Stopping ports...\n");
2596 RTE_ETH_FOREACH_DEV(pi) {
2597 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2600 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2601 printf("Please remove port %d from forwarding configuration.\n", pi);
2605 if (port_is_bonding_slave(pi)) {
2606 printf("Please remove port %d from bonded device.\n", pi);
2611 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2612 RTE_PORT_HANDLING) == 0)
2615 rte_eth_dev_stop(pi);
2617 if (rte_atomic16_cmpset(&(port->port_status),
2618 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2619 printf("Port %d can not be set into stopped\n", pi);
2620 need_check_link_status = 1;
2622 if (need_check_link_status && !no_link_check)
2623 check_all_ports_link_status(RTE_PORT_ALL);
2629 remove_invalid_ports_in(portid_t *array, portid_t *total)
2632 portid_t new_total = 0;
2634 for (i = 0; i < *total; i++)
2635 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2636 array[new_total] = array[i];
2643 remove_invalid_ports(void)
2645 remove_invalid_ports_in(ports_ids, &nb_ports);
2646 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2647 nb_cfg_ports = nb_fwd_ports;
2651 close_port(portid_t pid)
2654 struct rte_port *port;
2656 if (port_id_is_invalid(pid, ENABLED_WARN))
2659 printf("Closing ports...\n");
2661 RTE_ETH_FOREACH_DEV(pi) {
2662 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2665 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2666 printf("Please remove port %d from forwarding configuration.\n", pi);
2670 if (port_is_bonding_slave(pi)) {
2671 printf("Please remove port %d from bonded device.\n", pi);
2676 if (rte_atomic16_cmpset(&(port->port_status),
2677 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2678 printf("Port %d is already closed\n", pi);
2682 if (rte_atomic16_cmpset(&(port->port_status),
2683 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2684 printf("Port %d is now not stopped\n", pi);
2688 if (port->flow_list)
2689 port_flow_flush(pi);
2690 rte_eth_dev_close(pi);
2692 remove_invalid_ports();
2694 if (rte_atomic16_cmpset(&(port->port_status),
2695 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2696 printf("Port %d cannot be set to closed\n", pi);
2703 reset_port(portid_t pid)
2707 struct rte_port *port;
2709 if (port_id_is_invalid(pid, ENABLED_WARN))
2712 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2713 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2714 printf("Can not reset port(s), please stop port(s) first.\n");
2718 printf("Resetting ports...\n");
2720 RTE_ETH_FOREACH_DEV(pi) {
2721 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2724 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2725 printf("Please remove port %d from forwarding "
2726 "configuration.\n", pi);
2730 if (port_is_bonding_slave(pi)) {
2731 printf("Please remove port %d from bonded device.\n",
2736 diag = rte_eth_dev_reset(pi);
2739 port->need_reconfig = 1;
2740 port->need_reconfig_queues = 1;
2742 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2750 attach_port(char *identifier)
2753 struct rte_dev_iterator iterator;
2755 printf("Attaching a new port...\n");
2757 if (identifier == NULL) {
2758 printf("Invalid parameters are specified\n");
2762 if (rte_dev_probe(identifier) < 0) {
2763 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2767 /* first attach mode: event */
2768 if (setup_on_probe_event) {
2769 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2770 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2771 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2772 ports[pi].need_setup != 0)
2773 setup_attached_port(pi);
2777 /* second attach mode: iterator */
2778 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2779 /* setup ports matching the devargs used for probing */
2780 if (port_is_forwarding(pi))
2781 continue; /* port was already attached before */
2782 setup_attached_port(pi);
2787 setup_attached_port(portid_t pi)
2789 unsigned int socket_id;
2792 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2793 /* if socket_id is invalid, set to the first available socket. */
2794 if (check_socket_id(socket_id) < 0)
2795 socket_id = socket_ids[0];
2796 reconfig(pi, socket_id);
2797 ret = rte_eth_promiscuous_enable(pi);
2799 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2800 pi, rte_strerror(-ret));
2802 ports_ids[nb_ports++] = pi;
2803 fwd_ports_ids[nb_fwd_ports++] = pi;
2804 nb_cfg_ports = nb_fwd_ports;
2805 ports[pi].need_setup = 0;
2806 ports[pi].port_status = RTE_PORT_STOPPED;
2808 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2813 detach_device(struct rte_device *dev)
2818 printf("Device already removed\n");
2822 printf("Removing a device...\n");
2824 if (rte_dev_remove(dev) < 0) {
2825 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2828 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2829 /* reset mapping between old ports and removed device */
2830 rte_eth_devices[sibling].device = NULL;
2831 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2832 /* sibling ports are forced to be closed */
2833 ports[sibling].port_status = RTE_PORT_CLOSED;
2834 printf("Port %u is closed\n", sibling);
2838 remove_invalid_ports();
2840 printf("Device is detached\n");
2841 printf("Now total ports is %d\n", nb_ports);
2847 detach_port_device(portid_t port_id)
2849 if (port_id_is_invalid(port_id, ENABLED_WARN))
2852 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2853 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2854 printf("Port not stopped\n");
2857 printf("Port was not closed\n");
2858 if (ports[port_id].flow_list)
2859 port_flow_flush(port_id);
2862 detach_device(rte_eth_devices[port_id].device);
2866 detach_devargs(char *identifier)
2868 struct rte_dev_iterator iterator;
2869 struct rte_devargs da;
2872 printf("Removing a device...\n");
2874 memset(&da, 0, sizeof(da));
2875 if (rte_devargs_parsef(&da, "%s", identifier)) {
2876 printf("cannot parse identifier\n");
2882 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2883 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2884 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2885 printf("Port %u not stopped\n", port_id);
2886 rte_eth_iterator_cleanup(&iterator);
2890 /* sibling ports are forced to be closed */
2891 if (ports[port_id].flow_list)
2892 port_flow_flush(port_id);
2893 ports[port_id].port_status = RTE_PORT_CLOSED;
2894 printf("Port %u is now closed\n", port_id);
2898 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2899 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2900 da.name, da.bus->name);
2904 remove_invalid_ports();
2906 printf("Device %s is detached\n", identifier);
2907 printf("Now total ports is %d\n", nb_ports);
2919 stop_packet_forwarding();
2921 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2923 if (mp_alloc_type == MP_ALLOC_ANON)
2924 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2928 if (ports != NULL) {
2930 RTE_ETH_FOREACH_DEV(pt_id) {
2931 printf("\nStopping port %d...\n", pt_id);
2935 RTE_ETH_FOREACH_DEV(pt_id) {
2936 printf("\nShutting down port %d...\n", pt_id);
2943 ret = rte_dev_event_monitor_stop();
2946 "fail to stop device event monitor.");
2950 ret = rte_dev_event_callback_unregister(NULL,
2951 dev_event_callback, NULL);
2954 "fail to unregister device event callback.\n");
2958 ret = rte_dev_hotplug_handle_disable();
2961 "fail to disable hotplug handling.\n");
2965 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2967 rte_mempool_free(mempools[i]);
2970 printf("\nBye...\n");
2973 typedef void (*cmd_func_t)(void);
2974 struct pmd_test_command {
2975 const char *cmd_name;
2976 cmd_func_t cmd_func;
2979 /* Check the link status of all ports in up to 9s, and print them finally */
2981 check_all_ports_link_status(uint32_t port_mask)
2983 #define CHECK_INTERVAL 100 /* 100ms */
2984 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2986 uint8_t count, all_ports_up, print_flag = 0;
2987 struct rte_eth_link link;
2990 printf("Checking link statuses...\n");
2992 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2994 RTE_ETH_FOREACH_DEV(portid) {
2995 if ((port_mask & (1 << portid)) == 0)
2997 memset(&link, 0, sizeof(link));
2998 ret = rte_eth_link_get_nowait(portid, &link);
3001 if (print_flag == 1)
3002 printf("Port %u link get failed: %s\n",
3003 portid, rte_strerror(-ret));
3006 /* print link status if flag set */
3007 if (print_flag == 1) {
3008 if (link.link_status)
3010 "Port%d Link Up. speed %u Mbps- %s\n",
3011 portid, link.link_speed,
3012 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3013 ("full-duplex") : ("half-duplex"));
3015 printf("Port %d Link Down\n", portid);
3018 /* clear all_ports_up flag if any link down */
3019 if (link.link_status == ETH_LINK_DOWN) {
3024 /* after finally printing all link status, get out */
3025 if (print_flag == 1)
3028 if (all_ports_up == 0) {
3030 rte_delay_ms(CHECK_INTERVAL);
3033 /* set the print_flag if all ports up or timeout */
3034 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3044 * This callback is for remove a port for a device. It has limitation because
3045 * it is not for multiple port removal for a device.
3046 * TODO: the device detach invoke will plan to be removed from user side to
3047 * eal. And convert all PMDs to free port resources on ether device closing.
3050 rmv_port_callback(void *arg)
3052 int need_to_start = 0;
3053 int org_no_link_check = no_link_check;
3054 portid_t port_id = (intptr_t)arg;
3055 struct rte_device *dev;
3057 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3059 if (!test_done && port_is_forwarding(port_id)) {
3061 stop_packet_forwarding();
3065 no_link_check = org_no_link_check;
3067 /* Save rte_device pointer before closing ethdev port */
3068 dev = rte_eth_devices[port_id].device;
3069 close_port(port_id);
3070 detach_device(dev); /* might be already removed or have more ports */
3073 start_packet_forwarding(0);
3076 /* This function is used by the interrupt thread */
3078 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3081 RTE_SET_USED(param);
3082 RTE_SET_USED(ret_param);
3084 if (type >= RTE_ETH_EVENT_MAX) {
3085 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3086 port_id, __func__, type);
3088 } else if (event_print_mask & (UINT32_C(1) << type)) {
3089 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3090 eth_event_desc[type]);
3095 case RTE_ETH_EVENT_NEW:
3096 ports[port_id].need_setup = 1;
3097 ports[port_id].port_status = RTE_PORT_HANDLING;
3099 case RTE_ETH_EVENT_INTR_RMV:
3100 if (port_id_is_invalid(port_id, DISABLED_WARN))
3102 if (rte_eal_alarm_set(100000,
3103 rmv_port_callback, (void *)(intptr_t)port_id))
3104 fprintf(stderr, "Could not set up deferred device removal\n");
3113 register_eth_event_callback(void)
3116 enum rte_eth_event_type event;
3118 for (event = RTE_ETH_EVENT_UNKNOWN;
3119 event < RTE_ETH_EVENT_MAX; event++) {
3120 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3125 TESTPMD_LOG(ERR, "Failed to register callback for "
3126 "%s event\n", eth_event_desc[event]);
3134 /* This function is used by the interrupt thread */
3136 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3137 __rte_unused void *arg)
3142 if (type >= RTE_DEV_EVENT_MAX) {
3143 fprintf(stderr, "%s called upon invalid event %d\n",
3149 case RTE_DEV_EVENT_REMOVE:
3150 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3152 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3154 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3159 * Because the user's callback is invoked in eal interrupt
3160 * callback, the interrupt callback need to be finished before
3161 * it can be unregistered when detaching device. So finish
3162 * callback soon and use a deferred removal to detach device
3163 * is need. It is a workaround, once the device detaching be
3164 * moved into the eal in the future, the deferred removal could
3167 if (rte_eal_alarm_set(100000,
3168 rmv_port_callback, (void *)(intptr_t)port_id))
3170 "Could not set up deferred device removal\n");
3172 case RTE_DEV_EVENT_ADD:
3173 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3175 /* TODO: After finish kernel driver binding,
3176 * begin to attach port.
3185 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3189 uint8_t mapping_found = 0;
3191 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3192 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3193 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3194 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3195 tx_queue_stats_mappings[i].queue_id,
3196 tx_queue_stats_mappings[i].stats_counter_id);
3203 port->tx_queue_stats_mapping_enabled = 1;
3208 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3212 uint8_t mapping_found = 0;
3214 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3215 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3216 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3217 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3218 rx_queue_stats_mappings[i].queue_id,
3219 rx_queue_stats_mappings[i].stats_counter_id);
3226 port->rx_queue_stats_mapping_enabled = 1;
3231 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3235 diag = set_tx_queue_stats_mapping_registers(pi, port);
3237 if (diag == -ENOTSUP) {
3238 port->tx_queue_stats_mapping_enabled = 0;
3239 printf("TX queue stats mapping not supported port id=%d\n", pi);
3242 rte_exit(EXIT_FAILURE,
3243 "set_tx_queue_stats_mapping_registers "
3244 "failed for port id=%d diag=%d\n",
3248 diag = set_rx_queue_stats_mapping_registers(pi, port);
3250 if (diag == -ENOTSUP) {
3251 port->rx_queue_stats_mapping_enabled = 0;
3252 printf("RX queue stats mapping not supported port id=%d\n", pi);
3255 rte_exit(EXIT_FAILURE,
3256 "set_rx_queue_stats_mapping_registers "
3257 "failed for port id=%d diag=%d\n",
3263 rxtx_port_config(struct rte_port *port)
3268 for (qid = 0; qid < nb_rxq; qid++) {
3269 offloads = port->rx_conf[qid].offloads;
3270 port->rx_conf[qid] = port->dev_info.default_rxconf;
3272 port->rx_conf[qid].offloads = offloads;
3274 /* Check if any Rx parameters have been passed */
3275 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3276 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3278 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3279 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3281 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3282 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3284 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3285 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3287 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3288 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3290 port->nb_rx_desc[qid] = nb_rxd;
3293 for (qid = 0; qid < nb_txq; qid++) {
3294 offloads = port->tx_conf[qid].offloads;
3295 port->tx_conf[qid] = port->dev_info.default_txconf;
3297 port->tx_conf[qid].offloads = offloads;
3299 /* Check if any Tx parameters have been passed */
3300 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3301 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3303 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3304 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3306 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3307 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3309 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3310 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3312 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3313 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3315 port->nb_tx_desc[qid] = nb_txd;
3320 init_port_config(void)
3323 struct rte_port *port;
3326 RTE_ETH_FOREACH_DEV(pid) {
3328 port->dev_conf.fdir_conf = fdir_conf;
3330 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3335 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3336 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3337 rss_hf & port->dev_info.flow_type_rss_offloads;
3339 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3340 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3343 if (port->dcb_flag == 0) {
3344 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3345 port->dev_conf.rxmode.mq_mode =
3346 (enum rte_eth_rx_mq_mode)
3347 (rx_mq_mode & ETH_MQ_RX_RSS);
3349 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3352 rxtx_port_config(port);
3354 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3358 map_port_queue_stats_mapping_registers(pid, port);
3359 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3360 rte_pmd_ixgbe_bypass_init(pid);
3363 if (lsc_interrupt &&
3364 (rte_eth_devices[pid].data->dev_flags &
3365 RTE_ETH_DEV_INTR_LSC))
3366 port->dev_conf.intr_conf.lsc = 1;
3367 if (rmv_interrupt &&
3368 (rte_eth_devices[pid].data->dev_flags &
3369 RTE_ETH_DEV_INTR_RMV))
3370 port->dev_conf.intr_conf.rmv = 1;
3374 void set_port_slave_flag(portid_t slave_pid)
3376 struct rte_port *port;
3378 port = &ports[slave_pid];
3379 port->slave_flag = 1;
3382 void clear_port_slave_flag(portid_t slave_pid)
3384 struct rte_port *port;
3386 port = &ports[slave_pid];
3387 port->slave_flag = 0;
3390 uint8_t port_is_bonding_slave(portid_t slave_pid)
3392 struct rte_port *port;
3394 port = &ports[slave_pid];
3395 if ((rte_eth_devices[slave_pid].data->dev_flags &
3396 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3401 const uint16_t vlan_tags[] = {
3402 0, 1, 2, 3, 4, 5, 6, 7,
3403 8, 9, 10, 11, 12, 13, 14, 15,
3404 16, 17, 18, 19, 20, 21, 22, 23,
3405 24, 25, 26, 27, 28, 29, 30, 31
3409 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3410 enum dcb_mode_enable dcb_mode,
3411 enum rte_eth_nb_tcs num_tcs,
3416 struct rte_eth_rss_conf rss_conf;
3419 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3420 * given above, and the number of traffic classes available for use.
3422 if (dcb_mode == DCB_VT_ENABLED) {
3423 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3424 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3425 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3426 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3428 /* VMDQ+DCB RX and TX configurations */
3429 vmdq_rx_conf->enable_default_pool = 0;
3430 vmdq_rx_conf->default_pool = 0;
3431 vmdq_rx_conf->nb_queue_pools =
3432 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3433 vmdq_tx_conf->nb_queue_pools =
3434 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3436 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3437 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3438 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3439 vmdq_rx_conf->pool_map[i].pools =
3440 1 << (i % vmdq_rx_conf->nb_queue_pools);
3442 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3443 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3444 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3447 /* set DCB mode of RX and TX of multiple queues */
3448 eth_conf->rxmode.mq_mode =
3449 (enum rte_eth_rx_mq_mode)
3450 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3451 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3453 struct rte_eth_dcb_rx_conf *rx_conf =
3454 ð_conf->rx_adv_conf.dcb_rx_conf;
3455 struct rte_eth_dcb_tx_conf *tx_conf =
3456 ð_conf->tx_adv_conf.dcb_tx_conf;
3458 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3460 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3464 rx_conf->nb_tcs = num_tcs;
3465 tx_conf->nb_tcs = num_tcs;
3467 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3468 rx_conf->dcb_tc[i] = i % num_tcs;
3469 tx_conf->dcb_tc[i] = i % num_tcs;
3472 eth_conf->rxmode.mq_mode =
3473 (enum rte_eth_rx_mq_mode)
3474 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3475 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3476 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3480 eth_conf->dcb_capability_en =
3481 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3483 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3489 init_port_dcb_config(portid_t pid,
3490 enum dcb_mode_enable dcb_mode,
3491 enum rte_eth_nb_tcs num_tcs,
3494 struct rte_eth_conf port_conf;
3495 struct rte_port *rte_port;
3499 rte_port = &ports[pid];
3501 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3502 /* Enter DCB configuration status */
3505 port_conf.rxmode = rte_port->dev_conf.rxmode;
3506 port_conf.txmode = rte_port->dev_conf.txmode;
3508 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3509 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3512 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3514 /* re-configure the device . */
3515 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3519 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3523 /* If dev_info.vmdq_pool_base is greater than 0,
3524 * the queue id of vmdq pools is started after pf queues.
3526 if (dcb_mode == DCB_VT_ENABLED &&
3527 rte_port->dev_info.vmdq_pool_base > 0) {
3528 printf("VMDQ_DCB multi-queue mode is nonsensical"
3529 " for port %d.", pid);
3533 /* Assume the ports in testpmd have the same dcb capability
3534 * and has the same number of rxq and txq in dcb mode
3536 if (dcb_mode == DCB_VT_ENABLED) {
3537 if (rte_port->dev_info.max_vfs > 0) {
3538 nb_rxq = rte_port->dev_info.nb_rx_queues;
3539 nb_txq = rte_port->dev_info.nb_tx_queues;
3541 nb_rxq = rte_port->dev_info.max_rx_queues;
3542 nb_txq = rte_port->dev_info.max_tx_queues;
3545 /*if vt is disabled, use all pf queues */
3546 if (rte_port->dev_info.vmdq_pool_base == 0) {
3547 nb_rxq = rte_port->dev_info.max_rx_queues;
3548 nb_txq = rte_port->dev_info.max_tx_queues;
3550 nb_rxq = (queueid_t)num_tcs;
3551 nb_txq = (queueid_t)num_tcs;
3555 rx_free_thresh = 64;
3557 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3559 rxtx_port_config(rte_port);
3561 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3562 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3563 rx_vft_set(pid, vlan_tags[i], 1);
3565 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3569 map_port_queue_stats_mapping_registers(pid, rte_port);
3571 rte_port->dcb_flag = 1;
3579 /* Configuration of Ethernet ports. */
3580 ports = rte_zmalloc("testpmd: ports",
3581 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3582 RTE_CACHE_LINE_SIZE);
3583 if (ports == NULL) {
3584 rte_exit(EXIT_FAILURE,
3585 "rte_zmalloc(%d struct rte_port) failed\n",
3589 /* Initialize ports NUMA structures */
3590 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3591 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3592 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3606 const char clr[] = { 27, '[', '2', 'J', '\0' };
3607 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3609 /* Clear screen and move to top left */
3610 printf("%s%s", clr, top_left);
3612 printf("\nPort statistics ====================================");
3613 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3614 nic_stats_display(fwd_ports_ids[i]);
3620 signal_handler(int signum)
3622 if (signum == SIGINT || signum == SIGTERM) {
3623 printf("\nSignal %d received, preparing to exit...\n",
3625 #ifdef RTE_LIBRTE_PDUMP
3626 /* uninitialize packet capture framework */
3629 #ifdef RTE_LIBRTE_LATENCY_STATS
3630 if (latencystats_enabled != 0)
3631 rte_latencystats_uninit();
3634 /* Set flag to indicate the force termination. */
3636 /* exit with the expected status */
3637 signal(signum, SIG_DFL);
3638 kill(getpid(), signum);
3643 main(int argc, char** argv)
3650 signal(SIGINT, signal_handler);
3651 signal(SIGTERM, signal_handler);
3653 testpmd_logtype = rte_log_register("testpmd");
3654 if (testpmd_logtype < 0)
3655 rte_exit(EXIT_FAILURE, "Cannot register log type");
3656 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3658 diag = rte_eal_init(argc, argv);
3660 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3661 rte_strerror(rte_errno));
3663 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3664 rte_exit(EXIT_FAILURE,
3665 "Secondary process type not supported.\n");
3667 ret = register_eth_event_callback();
3669 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3671 #ifdef RTE_LIBRTE_PDUMP
3672 /* initialize packet capture framework */
3677 RTE_ETH_FOREACH_DEV(port_id) {
3678 ports_ids[count] = port_id;
3681 nb_ports = (portid_t) count;
3683 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3685 /* allocate port structures, and init them */
3688 set_def_fwd_config();
3690 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3691 "Check the core mask argument\n");
3693 /* Bitrate/latency stats disabled by default */
3694 #ifdef RTE_LIBRTE_BITRATE
3695 bitrate_enabled = 0;
3697 #ifdef RTE_LIBRTE_LATENCY_STATS
3698 latencystats_enabled = 0;
3701 /* on FreeBSD, mlockall() is disabled by default */
3702 #ifdef RTE_EXEC_ENV_FREEBSD
3711 launch_args_parse(argc, argv);
3713 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3714 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3718 if (tx_first && interactive)
3719 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3720 "interactive mode.\n");
3722 if (tx_first && lsc_interrupt) {
3723 printf("Warning: lsc_interrupt needs to be off when "
3724 " using tx_first. Disabling.\n");
3728 if (!nb_rxq && !nb_txq)
3729 printf("Warning: Either rx or tx queues should be non-zero\n");
3731 if (nb_rxq > 1 && nb_rxq > nb_txq)
3732 printf("Warning: nb_rxq=%d enables RSS configuration, "
3733 "but nb_txq=%d will prevent to fully test it.\n",
3739 ret = rte_dev_hotplug_handle_enable();
3742 "fail to enable hotplug handling.");
3746 ret = rte_dev_event_monitor_start();
3749 "fail to start device event monitoring.");
3753 ret = rte_dev_event_callback_register(NULL,
3754 dev_event_callback, NULL);
3757 "fail to register device event callback\n");
3762 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3763 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3765 /* set all ports to promiscuous mode by default */
3766 RTE_ETH_FOREACH_DEV(port_id) {
3767 ret = rte_eth_promiscuous_enable(port_id);
3769 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3770 port_id, rte_strerror(-ret));
3773 /* Init metrics library */
3774 rte_metrics_init(rte_socket_id());
3776 #ifdef RTE_LIBRTE_LATENCY_STATS
3777 if (latencystats_enabled != 0) {
3778 int ret = rte_latencystats_init(1, NULL);
3780 printf("Warning: latencystats init()"
3781 " returned error %d\n", ret);
3782 printf("Latencystats running on lcore %d\n",
3783 latencystats_lcore_id);
3787 /* Setup bitrate stats */
3788 #ifdef RTE_LIBRTE_BITRATE
3789 if (bitrate_enabled != 0) {
3790 bitrate_data = rte_stats_bitrate_create();
3791 if (bitrate_data == NULL)
3792 rte_exit(EXIT_FAILURE,
3793 "Could not allocate bitrate data.\n");
3794 rte_stats_bitrate_reg(bitrate_data);
3798 #ifdef RTE_LIBRTE_CMDLINE
3799 if (strlen(cmdline_filename) != 0)
3800 cmdline_read_from_file(cmdline_filename);
3802 if (interactive == 1) {
3804 printf("Start automatic packet forwarding\n");
3805 start_packet_forwarding(0);
3817 printf("No commandline core given, start packet forwarding\n");
3818 start_packet_forwarding(tx_first);
3819 if (stats_period != 0) {
3820 uint64_t prev_time = 0, cur_time, diff_time = 0;
3821 uint64_t timer_period;
3823 /* Convert to number of cycles */
3824 timer_period = stats_period * rte_get_timer_hz();
3826 while (f_quit == 0) {
3827 cur_time = rte_get_timer_cycles();
3828 diff_time += cur_time - prev_time;
3830 if (diff_time >= timer_period) {
3832 /* Reset the timer */
3835 /* Sleep to avoid unnecessary checks */
3836 prev_time = cur_time;
3841 printf("Press enter to exit\n");
3842 rc = read(0, &c, 1);
3848 ret = rte_eal_cleanup();
3850 rte_exit(EXIT_FAILURE,
3851 "EAL cleanup failed: %s\n", strerror(-ret));
3853 return EXIT_SUCCESS;