1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
239 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
240 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
243 * Configurable number of RX/TX ring descriptors.
244 * Defaults are supplied by drivers via ethdev.
246 #define RTE_TEST_RX_DESC_DEFAULT 0
247 #define RTE_TEST_TX_DESC_DEFAULT 0
248 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
249 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
251 #define RTE_PMD_PARAM_UNSET -1
253 * Configurable values of RX and TX ring threshold registers.
256 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
258 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
262 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
265 * Configurable value of RX free threshold.
267 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
270 * Configurable value of RX drop enable.
272 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
275 * Configurable value of TX free threshold.
277 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
280 * Configurable value of TX RS bit threshold.
282 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
285 * Configurable value of buffered packets before sending.
287 uint16_t noisy_tx_sw_bufsz;
290 * Configurable value of packet buffer timeout.
292 uint16_t noisy_tx_sw_buf_flush_time;
295 * Configurable value for size of VNF internal memory area
296 * used for simulating noisy neighbour behaviour
298 uint64_t noisy_lkup_mem_sz;
301 * Configurable value of number of random writes done in
302 * VNF simulation memory area.
304 uint64_t noisy_lkup_num_writes;
307 * Configurable value of number of random reads done in
308 * VNF simulation memory area.
310 uint64_t noisy_lkup_num_reads;
313 * Configurable value of number of random reads/writes done in
314 * VNF simulation memory area.
316 uint64_t noisy_lkup_num_reads_writes;
319 * Receive Side Scaling (RSS) configuration.
321 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
324 * Port topology configuration
326 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
329 * Avoids to flush all the RX streams before starts forwarding.
331 uint8_t no_flush_rx = 0; /* flush by default */
334 * Flow API isolated mode.
336 uint8_t flow_isolate_all;
339 * Avoids to check link status when starting/stopping a port.
341 uint8_t no_link_check = 0; /* check by default */
344 * Don't automatically start all ports in interactive mode.
346 uint8_t no_device_start = 0;
349 * Enable link status change notification
351 uint8_t lsc_interrupt = 1; /* enabled by default */
354 * Enable device removal notification.
356 uint8_t rmv_interrupt = 1; /* enabled by default */
358 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
360 /* After attach, port setup is called on event or by iterator */
361 bool setup_on_probe_event = true;
363 /* Clear ptypes on port initialization. */
364 uint8_t clear_ptypes = true;
366 /* Pretty printing of ethdev events */
367 static const char * const eth_event_desc[] = {
368 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
369 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
370 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
371 [RTE_ETH_EVENT_INTR_RESET] = "reset",
372 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
373 [RTE_ETH_EVENT_IPSEC] = "IPsec",
374 [RTE_ETH_EVENT_MACSEC] = "MACsec",
375 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
376 [RTE_ETH_EVENT_NEW] = "device probed",
377 [RTE_ETH_EVENT_DESTROY] = "device released",
378 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
379 [RTE_ETH_EVENT_MAX] = NULL,
383 * Display or mask ether events
384 * Default to all events except VF_MBOX
386 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
387 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
388 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
389 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
390 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
391 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
392 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
393 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
395 * Decide if all memory are locked for performance.
400 * NIC bypass mode configuration options.
403 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
404 /* The NIC bypass watchdog timeout. */
405 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
409 #ifdef RTE_LIBRTE_LATENCY_STATS
412 * Set when latency stats is enabled in the commandline
414 uint8_t latencystats_enabled;
417 * Lcore ID to serive latency statistics.
419 lcoreid_t latencystats_lcore_id = -1;
424 * Ethernet device configuration.
426 struct rte_eth_rxmode rx_mode = {
427 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
428 /**< Default maximum frame length. */
431 struct rte_eth_txmode tx_mode = {
432 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
435 struct rte_fdir_conf fdir_conf = {
436 .mode = RTE_FDIR_MODE_NONE,
437 .pballoc = RTE_FDIR_PBALLOC_64K,
438 .status = RTE_FDIR_REPORT_STATUS,
440 .vlan_tci_mask = 0xFFEF,
442 .src_ip = 0xFFFFFFFF,
443 .dst_ip = 0xFFFFFFFF,
446 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
447 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
449 .src_port_mask = 0xFFFF,
450 .dst_port_mask = 0xFFFF,
451 .mac_addr_byte_mask = 0xFF,
452 .tunnel_type_mask = 1,
453 .tunnel_id_mask = 0xFFFFFFFF,
458 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
460 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
461 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
463 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
464 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
466 uint16_t nb_tx_queue_stats_mappings = 0;
467 uint16_t nb_rx_queue_stats_mappings = 0;
470 * Display zero values by default for xstats
472 uint8_t xstats_hide_zero;
474 unsigned int num_sockets = 0;
475 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
477 #ifdef RTE_LIBRTE_BITRATE
478 /* Bitrate statistics */
479 struct rte_stats_bitrates *bitrate_data;
480 lcoreid_t bitrate_lcore_id;
481 uint8_t bitrate_enabled;
484 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
485 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
488 * hexadecimal bitmask of RX mq mode can be enabled.
490 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
492 /* Forward function declarations */
493 static void setup_attached_port(portid_t pi);
494 static void map_port_queue_stats_mapping_registers(portid_t pi,
495 struct rte_port *port);
496 static void check_all_ports_link_status(uint32_t port_mask);
497 static int eth_event_callback(portid_t port_id,
498 enum rte_eth_event_type type,
499 void *param, void *ret_param);
500 static void dev_event_callback(const char *device_name,
501 enum rte_dev_event_type type,
505 * Check if all the ports are started.
506 * If yes, return positive value. If not, return zero.
508 static int all_ports_started(void);
510 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
511 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
513 /* Holds the registered mbuf dynamic flags names. */
514 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
517 * Helper function to check if socket is already discovered.
518 * If yes, return positive value. If not, return zero.
521 new_socket_id(unsigned int socket_id)
525 for (i = 0; i < num_sockets; i++) {
526 if (socket_ids[i] == socket_id)
533 * Setup default configuration.
536 set_default_fwd_lcores_config(void)
540 unsigned int sock_num;
543 for (i = 0; i < RTE_MAX_LCORE; i++) {
544 if (!rte_lcore_is_enabled(i))
546 sock_num = rte_lcore_to_socket_id(i);
547 if (new_socket_id(sock_num)) {
548 if (num_sockets >= RTE_MAX_NUMA_NODES) {
549 rte_exit(EXIT_FAILURE,
550 "Total sockets greater than %u\n",
553 socket_ids[num_sockets++] = sock_num;
555 if (i == rte_get_master_lcore())
557 fwd_lcores_cpuids[nb_lc++] = i;
559 nb_lcores = (lcoreid_t) nb_lc;
560 nb_cfg_lcores = nb_lcores;
565 set_def_peer_eth_addrs(void)
569 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
570 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
571 peer_eth_addrs[i].addr_bytes[5] = i;
576 set_default_fwd_ports_config(void)
581 RTE_ETH_FOREACH_DEV(pt_id) {
582 fwd_ports_ids[i++] = pt_id;
584 /* Update sockets info according to the attached device */
585 int socket_id = rte_eth_dev_socket_id(pt_id);
586 if (socket_id >= 0 && new_socket_id(socket_id)) {
587 if (num_sockets >= RTE_MAX_NUMA_NODES) {
588 rte_exit(EXIT_FAILURE,
589 "Total sockets greater than %u\n",
592 socket_ids[num_sockets++] = socket_id;
596 nb_cfg_ports = nb_ports;
597 nb_fwd_ports = nb_ports;
601 set_def_fwd_config(void)
603 set_default_fwd_lcores_config();
604 set_def_peer_eth_addrs();
605 set_default_fwd_ports_config();
608 /* extremely pessimistic estimation of memory required to create a mempool */
610 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
612 unsigned int n_pages, mbuf_per_pg, leftover;
613 uint64_t total_mem, mbuf_mem, obj_sz;
615 /* there is no good way to predict how much space the mempool will
616 * occupy because it will allocate chunks on the fly, and some of those
617 * will come from default DPDK memory while some will come from our
618 * external memory, so just assume 128MB will be enough for everyone.
620 uint64_t hdr_mem = 128 << 20;
622 /* account for possible non-contiguousness */
623 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
625 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
629 mbuf_per_pg = pgsz / obj_sz;
630 leftover = (nb_mbufs % mbuf_per_pg) > 0;
631 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
633 mbuf_mem = n_pages * pgsz;
635 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
637 if (total_mem > SIZE_MAX) {
638 TESTPMD_LOG(ERR, "Memory size too big\n");
641 *out = (size_t)total_mem;
647 pagesz_flags(uint64_t page_sz)
649 /* as per mmap() manpage, all page sizes are log2 of page size
650 * shifted by MAP_HUGE_SHIFT
652 int log2 = rte_log2_u64(page_sz);
654 return (log2 << HUGE_SHIFT);
658 alloc_mem(size_t memsz, size_t pgsz, bool huge)
663 /* allocate anonymous hugepages */
664 flags = MAP_ANONYMOUS | MAP_PRIVATE;
666 flags |= HUGE_FLAG | pagesz_flags(pgsz);
668 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
669 if (addr == MAP_FAILED)
675 struct extmem_param {
679 rte_iova_t *iova_table;
680 unsigned int iova_table_len;
684 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
687 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
688 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
689 unsigned int cur_page, n_pages, pgsz_idx;
690 size_t mem_sz, cur_pgsz;
691 rte_iova_t *iovas = NULL;
695 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
696 /* skip anything that is too big */
697 if (pgsizes[pgsz_idx] > SIZE_MAX)
700 cur_pgsz = pgsizes[pgsz_idx];
702 /* if we were told not to allocate hugepages, override */
704 cur_pgsz = sysconf(_SC_PAGESIZE);
706 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
708 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
712 /* allocate our memory */
713 addr = alloc_mem(mem_sz, cur_pgsz, huge);
715 /* if we couldn't allocate memory with a specified page size,
716 * that doesn't mean we can't do it with other page sizes, so
722 /* store IOVA addresses for every page in this memory area */
723 n_pages = mem_sz / cur_pgsz;
725 iovas = malloc(sizeof(*iovas) * n_pages);
728 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
731 /* lock memory if it's not huge pages */
735 /* populate IOVA addresses */
736 for (cur_page = 0; cur_page < n_pages; cur_page++) {
741 offset = cur_pgsz * cur_page;
742 cur = RTE_PTR_ADD(addr, offset);
744 /* touch the page before getting its IOVA */
745 *(volatile char *)cur = 0;
747 iova = rte_mem_virt2iova(cur);
749 iovas[cur_page] = iova;
754 /* if we couldn't allocate anything */
760 param->pgsz = cur_pgsz;
761 param->iova_table = iovas;
762 param->iova_table_len = n_pages;
769 munmap(addr, mem_sz);
775 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
777 struct extmem_param param;
780 memset(¶m, 0, sizeof(param));
782 /* check if our heap exists */
783 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
785 /* create our heap */
786 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
788 TESTPMD_LOG(ERR, "Cannot create heap\n");
793 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
795 TESTPMD_LOG(ERR, "Cannot create memory area\n");
799 /* we now have a valid memory area, so add it to heap */
800 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
801 param.addr, param.len, param.iova_table,
802 param.iova_table_len, param.pgsz);
804 /* when using VFIO, memory is automatically mapped for DMA by EAL */
806 /* not needed any more */
807 free(param.iova_table);
810 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
811 munmap(param.addr, param.len);
817 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
823 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
824 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
829 RTE_ETH_FOREACH_DEV(pid) {
830 struct rte_eth_dev *dev =
831 &rte_eth_devices[pid];
833 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
837 "unable to DMA unmap addr 0x%p "
839 memhdr->addr, dev->data->name);
842 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
845 "unable to un-register addr 0x%p\n", memhdr->addr);
850 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
851 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
854 size_t page_size = sysconf(_SC_PAGESIZE);
857 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
861 "unable to register addr 0x%p\n", memhdr->addr);
864 RTE_ETH_FOREACH_DEV(pid) {
865 struct rte_eth_dev *dev =
866 &rte_eth_devices[pid];
868 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
872 "unable to DMA map addr 0x%p "
874 memhdr->addr, dev->data->name);
880 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
881 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
883 struct rte_pktmbuf_extmem *xmem;
884 unsigned int ext_num, zone_num, elt_num;
887 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
888 elt_num = EXTBUF_ZONE_SIZE / elt_size;
889 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
891 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
893 TESTPMD_LOG(ERR, "Cannot allocate memory for "
894 "external buffer descriptors\n");
898 for (ext_num = 0; ext_num < zone_num; ext_num++) {
899 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
900 const struct rte_memzone *mz;
901 char mz_name[RTE_MEMZONE_NAMESIZE];
904 ret = snprintf(mz_name, sizeof(mz_name),
905 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
906 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
907 errno = ENAMETOOLONG;
911 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
913 RTE_MEMZONE_IOVA_CONTIG |
915 RTE_MEMZONE_SIZE_HINT_ONLY,
919 * The caller exits on external buffer creation
920 * error, so there is no need to free memzones.
926 xseg->buf_ptr = mz->addr;
927 xseg->buf_iova = mz->iova;
928 xseg->buf_len = EXTBUF_ZONE_SIZE;
929 xseg->elt_size = elt_size;
931 if (ext_num == 0 && xmem != NULL) {
940 * Configuration initialisation done once at init time.
942 static struct rte_mempool *
943 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
944 unsigned int socket_id)
946 char pool_name[RTE_MEMPOOL_NAMESIZE];
947 struct rte_mempool *rte_mp = NULL;
950 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
951 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
954 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
955 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
957 switch (mp_alloc_type) {
958 case MP_ALLOC_NATIVE:
960 /* wrapper to rte_mempool_create() */
961 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
962 rte_mbuf_best_mempool_ops());
963 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
964 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
969 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
970 mb_size, (unsigned int) mb_mempool_cache,
971 sizeof(struct rte_pktmbuf_pool_private),
972 socket_id, mempool_flags);
976 if (rte_mempool_populate_anon(rte_mp) == 0) {
977 rte_mempool_free(rte_mp);
981 rte_pktmbuf_pool_init(rte_mp, NULL);
982 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
983 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
987 case MP_ALLOC_XMEM_HUGE:
990 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
992 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
993 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
996 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
998 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1000 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1001 rte_mbuf_best_mempool_ops());
1002 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1003 mb_mempool_cache, 0, mbuf_seg_size,
1009 struct rte_pktmbuf_extmem *ext_mem;
1010 unsigned int ext_num;
1012 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1013 socket_id, pool_name, &ext_mem);
1015 rte_exit(EXIT_FAILURE,
1016 "Can't create pinned data buffers\n");
1018 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1019 rte_mbuf_best_mempool_ops());
1020 rte_mp = rte_pktmbuf_pool_create_extbuf
1021 (pool_name, nb_mbuf, mb_mempool_cache,
1022 0, mbuf_seg_size, socket_id,
1029 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1034 if (rte_mp == NULL) {
1035 rte_exit(EXIT_FAILURE,
1036 "Creation of mbuf pool for socket %u failed: %s\n",
1037 socket_id, rte_strerror(rte_errno));
1038 } else if (verbose_level > 0) {
1039 rte_mempool_dump(stdout, rte_mp);
1045 * Check given socket id is valid or not with NUMA mode,
1046 * if valid, return 0, else return -1
1049 check_socket_id(const unsigned int socket_id)
1051 static int warning_once = 0;
1053 if (new_socket_id(socket_id)) {
1054 if (!warning_once && numa_support)
1055 printf("Warning: NUMA should be configured manually by"
1056 " using --port-numa-config and"
1057 " --ring-numa-config parameters along with"
1066 * Get the allowed maximum number of RX queues.
1067 * *pid return the port id which has minimal value of
1068 * max_rx_queues in all ports.
1071 get_allowed_max_nb_rxq(portid_t *pid)
1073 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1074 bool max_rxq_valid = false;
1076 struct rte_eth_dev_info dev_info;
1078 RTE_ETH_FOREACH_DEV(pi) {
1079 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1082 max_rxq_valid = true;
1083 if (dev_info.max_rx_queues < allowed_max_rxq) {
1084 allowed_max_rxq = dev_info.max_rx_queues;
1088 return max_rxq_valid ? allowed_max_rxq : 0;
1092 * Check input rxq is valid or not.
1093 * If input rxq is not greater than any of maximum number
1094 * of RX queues of all ports, it is valid.
1095 * if valid, return 0, else return -1
1098 check_nb_rxq(queueid_t rxq)
1100 queueid_t allowed_max_rxq;
1103 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1104 if (rxq > allowed_max_rxq) {
1105 printf("Fail: input rxq (%u) can't be greater "
1106 "than max_rx_queues (%u) of port %u\n",
1116 * Get the allowed maximum number of TX queues.
1117 * *pid return the port id which has minimal value of
1118 * max_tx_queues in all ports.
1121 get_allowed_max_nb_txq(portid_t *pid)
1123 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1124 bool max_txq_valid = false;
1126 struct rte_eth_dev_info dev_info;
1128 RTE_ETH_FOREACH_DEV(pi) {
1129 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1132 max_txq_valid = true;
1133 if (dev_info.max_tx_queues < allowed_max_txq) {
1134 allowed_max_txq = dev_info.max_tx_queues;
1138 return max_txq_valid ? allowed_max_txq : 0;
1142 * Check input txq is valid or not.
1143 * If input txq is not greater than any of maximum number
1144 * of TX queues of all ports, it is valid.
1145 * if valid, return 0, else return -1
1148 check_nb_txq(queueid_t txq)
1150 queueid_t allowed_max_txq;
1153 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1154 if (txq > allowed_max_txq) {
1155 printf("Fail: input txq (%u) can't be greater "
1156 "than max_tx_queues (%u) of port %u\n",
1166 * Get the allowed maximum number of RXDs of every rx queue.
1167 * *pid return the port id which has minimal value of
1168 * max_rxd in all queues of all ports.
1171 get_allowed_max_nb_rxd(portid_t *pid)
1173 uint16_t allowed_max_rxd = UINT16_MAX;
1175 struct rte_eth_dev_info dev_info;
1177 RTE_ETH_FOREACH_DEV(pi) {
1178 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1181 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1182 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1186 return allowed_max_rxd;
1190 * Get the allowed minimal number of RXDs of every rx queue.
1191 * *pid return the port id which has minimal value of
1192 * min_rxd in all queues of all ports.
1195 get_allowed_min_nb_rxd(portid_t *pid)
1197 uint16_t allowed_min_rxd = 0;
1199 struct rte_eth_dev_info dev_info;
1201 RTE_ETH_FOREACH_DEV(pi) {
1202 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1205 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1206 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1211 return allowed_min_rxd;
1215 * Check input rxd is valid or not.
1216 * If input rxd is not greater than any of maximum number
1217 * of RXDs of every Rx queues and is not less than any of
1218 * minimal number of RXDs of every Rx queues, it is valid.
1219 * if valid, return 0, else return -1
1222 check_nb_rxd(queueid_t rxd)
1224 uint16_t allowed_max_rxd;
1225 uint16_t allowed_min_rxd;
1228 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1229 if (rxd > allowed_max_rxd) {
1230 printf("Fail: input rxd (%u) can't be greater "
1231 "than max_rxds (%u) of port %u\n",
1238 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1239 if (rxd < allowed_min_rxd) {
1240 printf("Fail: input rxd (%u) can't be less "
1241 "than min_rxds (%u) of port %u\n",
1252 * Get the allowed maximum number of TXDs of every rx queues.
1253 * *pid return the port id which has minimal value of
1254 * max_txd in every tx queue.
1257 get_allowed_max_nb_txd(portid_t *pid)
1259 uint16_t allowed_max_txd = UINT16_MAX;
1261 struct rte_eth_dev_info dev_info;
1263 RTE_ETH_FOREACH_DEV(pi) {
1264 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1267 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1268 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1272 return allowed_max_txd;
1276 * Get the allowed maximum number of TXDs of every tx queues.
1277 * *pid return the port id which has minimal value of
1278 * min_txd in every tx queue.
1281 get_allowed_min_nb_txd(portid_t *pid)
1283 uint16_t allowed_min_txd = 0;
1285 struct rte_eth_dev_info dev_info;
1287 RTE_ETH_FOREACH_DEV(pi) {
1288 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1291 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1292 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1297 return allowed_min_txd;
1301 * Check input txd is valid or not.
1302 * If input txd is not greater than any of maximum number
1303 * of TXDs of every Rx queues, it is valid.
1304 * if valid, return 0, else return -1
1307 check_nb_txd(queueid_t txd)
1309 uint16_t allowed_max_txd;
1310 uint16_t allowed_min_txd;
1313 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1314 if (txd > allowed_max_txd) {
1315 printf("Fail: input txd (%u) can't be greater "
1316 "than max_txds (%u) of port %u\n",
1323 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1324 if (txd < allowed_min_txd) {
1325 printf("Fail: input txd (%u) can't be less "
1326 "than min_txds (%u) of port %u\n",
1337 * Get the allowed maximum number of hairpin queues.
1338 * *pid return the port id which has minimal value of
1339 * max_hairpin_queues in all ports.
1342 get_allowed_max_nb_hairpinq(portid_t *pid)
1344 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1346 struct rte_eth_hairpin_cap cap;
1348 RTE_ETH_FOREACH_DEV(pi) {
1349 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1353 if (cap.max_nb_queues < allowed_max_hairpinq) {
1354 allowed_max_hairpinq = cap.max_nb_queues;
1358 return allowed_max_hairpinq;
1362 * Check input hairpin is valid or not.
1363 * If input hairpin is not greater than any of maximum number
1364 * of hairpin queues of all ports, it is valid.
1365 * if valid, return 0, else return -1
1368 check_nb_hairpinq(queueid_t hairpinq)
1370 queueid_t allowed_max_hairpinq;
1373 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1374 if (hairpinq > allowed_max_hairpinq) {
1375 printf("Fail: input hairpin (%u) can't be greater "
1376 "than max_hairpin_queues (%u) of port %u\n",
1377 hairpinq, allowed_max_hairpinq, pid);
1387 struct rte_port *port;
1388 struct rte_mempool *mbp;
1389 unsigned int nb_mbuf_per_pool;
1391 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1392 struct rte_gro_param gro_param;
1399 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1401 /* Configuration of logical cores. */
1402 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1403 sizeof(struct fwd_lcore *) * nb_lcores,
1404 RTE_CACHE_LINE_SIZE);
1405 if (fwd_lcores == NULL) {
1406 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1407 "failed\n", nb_lcores);
1409 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1410 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1411 sizeof(struct fwd_lcore),
1412 RTE_CACHE_LINE_SIZE);
1413 if (fwd_lcores[lc_id] == NULL) {
1414 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1417 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1420 RTE_ETH_FOREACH_DEV(pid) {
1422 /* Apply default TxRx configuration for all ports */
1423 port->dev_conf.txmode = tx_mode;
1424 port->dev_conf.rxmode = rx_mode;
1426 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1428 rte_exit(EXIT_FAILURE,
1429 "rte_eth_dev_info_get() failed\n");
1431 if (!(port->dev_info.tx_offload_capa &
1432 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1433 port->dev_conf.txmode.offloads &=
1434 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1436 if (port_numa[pid] != NUMA_NO_CONFIG)
1437 port_per_socket[port_numa[pid]]++;
1439 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1442 * if socket_id is invalid,
1443 * set to the first available socket.
1445 if (check_socket_id(socket_id) < 0)
1446 socket_id = socket_ids[0];
1447 port_per_socket[socket_id]++;
1451 /* Apply Rx offloads configuration */
1452 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1453 port->rx_conf[k].offloads =
1454 port->dev_conf.rxmode.offloads;
1455 /* Apply Tx offloads configuration */
1456 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1457 port->tx_conf[k].offloads =
1458 port->dev_conf.txmode.offloads;
1460 /* set flag to initialize port/queue */
1461 port->need_reconfig = 1;
1462 port->need_reconfig_queues = 1;
1463 port->tx_metadata = 0;
1465 /* Check for maximum number of segments per MTU. Accordingly
1466 * update the mbuf data size.
1468 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1469 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1470 data_size = rx_mode.max_rx_pkt_len /
1471 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1473 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1475 mbuf_data_size = data_size +
1476 RTE_PKTMBUF_HEADROOM;
1483 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1487 * Create pools of mbuf.
1488 * If NUMA support is disabled, create a single pool of mbuf in
1489 * socket 0 memory by default.
1490 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1492 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1493 * nb_txd can be configured at run time.
1495 if (param_total_num_mbufs)
1496 nb_mbuf_per_pool = param_total_num_mbufs;
1498 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1499 (nb_lcores * mb_mempool_cache) +
1500 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1501 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1507 for (i = 0; i < num_sockets; i++)
1508 mempools[i] = mbuf_pool_create(mbuf_data_size,
1512 if (socket_num == UMA_NO_CONFIG)
1513 mempools[0] = mbuf_pool_create(mbuf_data_size,
1514 nb_mbuf_per_pool, 0);
1516 mempools[socket_num] = mbuf_pool_create
1524 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1525 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1527 * Records which Mbuf pool to use by each logical core, if needed.
1529 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1530 mbp = mbuf_pool_find(
1531 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1534 mbp = mbuf_pool_find(0);
1535 fwd_lcores[lc_id]->mbp = mbp;
1536 /* initialize GSO context */
1537 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1538 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1539 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1540 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1542 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1545 /* Configuration of packet forwarding streams. */
1546 if (init_fwd_streams() < 0)
1547 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1551 /* create a gro context for each lcore */
1552 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1553 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1554 gro_param.max_item_per_flow = MAX_PKT_BURST;
1555 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1556 gro_param.socket_id = rte_lcore_to_socket_id(
1557 fwd_lcores_cpuids[lc_id]);
1558 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1559 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1560 rte_exit(EXIT_FAILURE,
1561 "rte_gro_ctx_create() failed\n");
1565 #if defined RTE_LIBRTE_PMD_SOFTNIC
1566 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1567 RTE_ETH_FOREACH_DEV(pid) {
1569 const char *driver = port->dev_info.driver_name;
1571 if (strcmp(driver, "net_softnic") == 0)
1572 port->softport.fwd_lcore_arg = fwd_lcores;
1581 reconfig(portid_t new_port_id, unsigned socket_id)
1583 struct rte_port *port;
1586 /* Reconfiguration of Ethernet ports. */
1587 port = &ports[new_port_id];
1589 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1593 /* set flag to initialize port/queue */
1594 port->need_reconfig = 1;
1595 port->need_reconfig_queues = 1;
1596 port->socket_id = socket_id;
1603 init_fwd_streams(void)
1606 struct rte_port *port;
1607 streamid_t sm_id, nb_fwd_streams_new;
1610 /* set socket id according to numa or not */
1611 RTE_ETH_FOREACH_DEV(pid) {
1613 if (nb_rxq > port->dev_info.max_rx_queues) {
1614 printf("Fail: nb_rxq(%d) is greater than "
1615 "max_rx_queues(%d)\n", nb_rxq,
1616 port->dev_info.max_rx_queues);
1619 if (nb_txq > port->dev_info.max_tx_queues) {
1620 printf("Fail: nb_txq(%d) is greater than "
1621 "max_tx_queues(%d)\n", nb_txq,
1622 port->dev_info.max_tx_queues);
1626 if (port_numa[pid] != NUMA_NO_CONFIG)
1627 port->socket_id = port_numa[pid];
1629 port->socket_id = rte_eth_dev_socket_id(pid);
1632 * if socket_id is invalid,
1633 * set to the first available socket.
1635 if (check_socket_id(port->socket_id) < 0)
1636 port->socket_id = socket_ids[0];
1640 if (socket_num == UMA_NO_CONFIG)
1641 port->socket_id = 0;
1643 port->socket_id = socket_num;
1647 q = RTE_MAX(nb_rxq, nb_txq);
1649 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1652 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1653 if (nb_fwd_streams_new == nb_fwd_streams)
1656 if (fwd_streams != NULL) {
1657 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1658 if (fwd_streams[sm_id] == NULL)
1660 rte_free(fwd_streams[sm_id]);
1661 fwd_streams[sm_id] = NULL;
1663 rte_free(fwd_streams);
1668 nb_fwd_streams = nb_fwd_streams_new;
1669 if (nb_fwd_streams) {
1670 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1671 sizeof(struct fwd_stream *) * nb_fwd_streams,
1672 RTE_CACHE_LINE_SIZE);
1673 if (fwd_streams == NULL)
1674 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1675 " (struct fwd_stream *)) failed\n",
1678 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1679 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1680 " struct fwd_stream", sizeof(struct fwd_stream),
1681 RTE_CACHE_LINE_SIZE);
1682 if (fwd_streams[sm_id] == NULL)
1683 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1684 "(struct fwd_stream) failed\n");
1691 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1693 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1695 uint64_t total_burst, sburst;
1697 uint64_t burst_stats[4];
1698 uint16_t pktnb_stats[4];
1700 int burst_percent[4], sburstp;
1704 * First compute the total number of packet bursts and the
1705 * two highest numbers of bursts of the same number of packets.
1707 memset(&burst_stats, 0x0, sizeof(burst_stats));
1708 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1710 /* Show stats for 0 burst size always */
1711 total_burst = pbs->pkt_burst_spread[0];
1712 burst_stats[0] = pbs->pkt_burst_spread[0];
1715 /* Find the next 2 burst sizes with highest occurrences. */
1716 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1717 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1722 total_burst += nb_burst;
1724 if (nb_burst > burst_stats[1]) {
1725 burst_stats[2] = burst_stats[1];
1726 pktnb_stats[2] = pktnb_stats[1];
1727 burst_stats[1] = nb_burst;
1728 pktnb_stats[1] = nb_pkt;
1729 } else if (nb_burst > burst_stats[2]) {
1730 burst_stats[2] = nb_burst;
1731 pktnb_stats[2] = nb_pkt;
1734 if (total_burst == 0)
1737 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1738 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1740 printf("%d%% of other]\n", 100 - sburstp);
1744 sburst += burst_stats[i];
1745 if (sburst == total_burst) {
1746 printf("%d%% of %d pkts]\n",
1747 100 - sburstp, (int) pktnb_stats[i]);
1752 (double)burst_stats[i] / total_burst * 100;
1753 printf("%d%% of %d pkts + ",
1754 burst_percent[i], (int) pktnb_stats[i]);
1755 sburstp += burst_percent[i];
1758 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1761 fwd_stream_stats_display(streamid_t stream_id)
1763 struct fwd_stream *fs;
1764 static const char *fwd_top_stats_border = "-------";
1766 fs = fwd_streams[stream_id];
1767 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1768 (fs->fwd_dropped == 0))
1770 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1771 "TX Port=%2d/Queue=%2d %s\n",
1772 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1773 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1774 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1775 " TX-dropped: %-14"PRIu64,
1776 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1778 /* if checksum mode */
1779 if (cur_fwd_eng == &csum_fwd_engine) {
1780 printf(" RX- bad IP checksum: %-14"PRIu64
1781 " Rx- bad L4 checksum: %-14"PRIu64
1782 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1783 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1784 fs->rx_bad_outer_l4_csum);
1789 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1790 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1791 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1796 fwd_stats_display(void)
1798 static const char *fwd_stats_border = "----------------------";
1799 static const char *acc_stats_border = "+++++++++++++++";
1801 struct fwd_stream *rx_stream;
1802 struct fwd_stream *tx_stream;
1803 uint64_t tx_dropped;
1804 uint64_t rx_bad_ip_csum;
1805 uint64_t rx_bad_l4_csum;
1806 uint64_t rx_bad_outer_l4_csum;
1807 } ports_stats[RTE_MAX_ETHPORTS];
1808 uint64_t total_rx_dropped = 0;
1809 uint64_t total_tx_dropped = 0;
1810 uint64_t total_rx_nombuf = 0;
1811 struct rte_eth_stats stats;
1812 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1813 uint64_t fwd_cycles = 0;
1815 uint64_t total_recv = 0;
1816 uint64_t total_xmit = 0;
1817 struct rte_port *port;
1822 memset(ports_stats, 0, sizeof(ports_stats));
1824 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1825 struct fwd_stream *fs = fwd_streams[sm_id];
1827 if (cur_fwd_config.nb_fwd_streams >
1828 cur_fwd_config.nb_fwd_ports) {
1829 fwd_stream_stats_display(sm_id);
1831 ports_stats[fs->tx_port].tx_stream = fs;
1832 ports_stats[fs->rx_port].rx_stream = fs;
1835 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1837 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1838 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1839 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1840 fs->rx_bad_outer_l4_csum;
1842 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1843 fwd_cycles += fs->core_cycles;
1846 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1849 pt_id = fwd_ports_ids[i];
1850 port = &ports[pt_id];
1852 rte_eth_stats_get(pt_id, &stats);
1853 stats.ipackets -= port->stats.ipackets;
1854 stats.opackets -= port->stats.opackets;
1855 stats.ibytes -= port->stats.ibytes;
1856 stats.obytes -= port->stats.obytes;
1857 stats.imissed -= port->stats.imissed;
1858 stats.oerrors -= port->stats.oerrors;
1859 stats.rx_nombuf -= port->stats.rx_nombuf;
1861 total_recv += stats.ipackets;
1862 total_xmit += stats.opackets;
1863 total_rx_dropped += stats.imissed;
1864 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1865 total_tx_dropped += stats.oerrors;
1866 total_rx_nombuf += stats.rx_nombuf;
1868 printf("\n %s Forward statistics for port %-2d %s\n",
1869 fwd_stats_border, pt_id, fwd_stats_border);
1871 if (!port->rx_queue_stats_mapping_enabled &&
1872 !port->tx_queue_stats_mapping_enabled) {
1873 printf(" RX-packets: %-14"PRIu64
1874 " RX-dropped: %-14"PRIu64
1875 "RX-total: %-"PRIu64"\n",
1876 stats.ipackets, stats.imissed,
1877 stats.ipackets + stats.imissed);
1879 if (cur_fwd_eng == &csum_fwd_engine)
1880 printf(" Bad-ipcsum: %-14"PRIu64
1881 " Bad-l4csum: %-14"PRIu64
1882 "Bad-outer-l4csum: %-14"PRIu64"\n",
1883 ports_stats[pt_id].rx_bad_ip_csum,
1884 ports_stats[pt_id].rx_bad_l4_csum,
1885 ports_stats[pt_id].rx_bad_outer_l4_csum);
1886 if (stats.ierrors + stats.rx_nombuf > 0) {
1887 printf(" RX-error: %-"PRIu64"\n",
1889 printf(" RX-nombufs: %-14"PRIu64"\n",
1893 printf(" TX-packets: %-14"PRIu64
1894 " TX-dropped: %-14"PRIu64
1895 "TX-total: %-"PRIu64"\n",
1896 stats.opackets, ports_stats[pt_id].tx_dropped,
1897 stats.opackets + ports_stats[pt_id].tx_dropped);
1899 printf(" RX-packets: %14"PRIu64
1900 " RX-dropped:%14"PRIu64
1901 " RX-total:%14"PRIu64"\n",
1902 stats.ipackets, stats.imissed,
1903 stats.ipackets + stats.imissed);
1905 if (cur_fwd_eng == &csum_fwd_engine)
1906 printf(" Bad-ipcsum:%14"PRIu64
1907 " Bad-l4csum:%14"PRIu64
1908 " Bad-outer-l4csum: %-14"PRIu64"\n",
1909 ports_stats[pt_id].rx_bad_ip_csum,
1910 ports_stats[pt_id].rx_bad_l4_csum,
1911 ports_stats[pt_id].rx_bad_outer_l4_csum);
1912 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1913 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1914 printf(" RX-nombufs: %14"PRIu64"\n",
1918 printf(" TX-packets: %14"PRIu64
1919 " TX-dropped:%14"PRIu64
1920 " TX-total:%14"PRIu64"\n",
1921 stats.opackets, ports_stats[pt_id].tx_dropped,
1922 stats.opackets + ports_stats[pt_id].tx_dropped);
1925 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1926 if (ports_stats[pt_id].rx_stream)
1927 pkt_burst_stats_display("RX",
1928 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1929 if (ports_stats[pt_id].tx_stream)
1930 pkt_burst_stats_display("TX",
1931 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1934 if (port->rx_queue_stats_mapping_enabled) {
1936 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1937 printf(" Stats reg %2d RX-packets:%14"PRIu64
1938 " RX-errors:%14"PRIu64
1939 " RX-bytes:%14"PRIu64"\n",
1940 j, stats.q_ipackets[j],
1941 stats.q_errors[j], stats.q_ibytes[j]);
1945 if (port->tx_queue_stats_mapping_enabled) {
1946 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1947 printf(" Stats reg %2d TX-packets:%14"PRIu64
1950 j, stats.q_opackets[j],
1955 printf(" %s--------------------------------%s\n",
1956 fwd_stats_border, fwd_stats_border);
1959 printf("\n %s Accumulated forward statistics for all ports"
1961 acc_stats_border, acc_stats_border);
1962 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1964 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1966 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1967 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1968 if (total_rx_nombuf > 0)
1969 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1970 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1972 acc_stats_border, acc_stats_border);
1973 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1974 #define CYC_PER_MHZ 1E6
1976 printf("\n CPU cycles/packet=%.2F (total cycles="
1977 "%"PRIu64" / total RX packets=%"PRIu64") at %"PRIu64
1979 (double) fwd_cycles / total_recv,
1980 fwd_cycles, total_recv,
1981 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1986 fwd_stats_reset(void)
1992 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1993 pt_id = fwd_ports_ids[i];
1994 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1996 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1997 struct fwd_stream *fs = fwd_streams[sm_id];
2001 fs->fwd_dropped = 0;
2002 fs->rx_bad_ip_csum = 0;
2003 fs->rx_bad_l4_csum = 0;
2004 fs->rx_bad_outer_l4_csum = 0;
2006 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
2007 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2008 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2010 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
2011 fs->core_cycles = 0;
2017 flush_fwd_rx_queues(void)
2019 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2026 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2027 uint64_t timer_period;
2029 /* convert to number of cycles */
2030 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2032 for (j = 0; j < 2; j++) {
2033 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2034 for (rxq = 0; rxq < nb_rxq; rxq++) {
2035 port_id = fwd_ports_ids[rxp];
2037 * testpmd can stuck in the below do while loop
2038 * if rte_eth_rx_burst() always returns nonzero
2039 * packets. So timer is added to exit this loop
2040 * after 1sec timer expiry.
2042 prev_tsc = rte_rdtsc();
2044 nb_rx = rte_eth_rx_burst(port_id, rxq,
2045 pkts_burst, MAX_PKT_BURST);
2046 for (i = 0; i < nb_rx; i++)
2047 rte_pktmbuf_free(pkts_burst[i]);
2049 cur_tsc = rte_rdtsc();
2050 diff_tsc = cur_tsc - prev_tsc;
2051 timer_tsc += diff_tsc;
2052 } while ((nb_rx > 0) &&
2053 (timer_tsc < timer_period));
2057 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2062 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2064 struct fwd_stream **fsm;
2067 #ifdef RTE_LIBRTE_BITRATE
2068 uint64_t tics_per_1sec;
2069 uint64_t tics_datum;
2070 uint64_t tics_current;
2071 uint16_t i, cnt_ports;
2073 cnt_ports = nb_ports;
2074 tics_datum = rte_rdtsc();
2075 tics_per_1sec = rte_get_timer_hz();
2077 fsm = &fwd_streams[fc->stream_idx];
2078 nb_fs = fc->stream_nb;
2080 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2081 (*pkt_fwd)(fsm[sm_id]);
2082 #ifdef RTE_LIBRTE_BITRATE
2083 if (bitrate_enabled != 0 &&
2084 bitrate_lcore_id == rte_lcore_id()) {
2085 tics_current = rte_rdtsc();
2086 if (tics_current - tics_datum >= tics_per_1sec) {
2087 /* Periodic bitrate calculation */
2088 for (i = 0; i < cnt_ports; i++)
2089 rte_stats_bitrate_calc(bitrate_data,
2091 tics_datum = tics_current;
2095 #ifdef RTE_LIBRTE_LATENCY_STATS
2096 if (latencystats_enabled != 0 &&
2097 latencystats_lcore_id == rte_lcore_id())
2098 rte_latencystats_update();
2101 } while (! fc->stopped);
2105 start_pkt_forward_on_core(void *fwd_arg)
2107 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2108 cur_fwd_config.fwd_eng->packet_fwd);
2113 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2114 * Used to start communication flows in network loopback test configurations.
2117 run_one_txonly_burst_on_core(void *fwd_arg)
2119 struct fwd_lcore *fwd_lc;
2120 struct fwd_lcore tmp_lcore;
2122 fwd_lc = (struct fwd_lcore *) fwd_arg;
2123 tmp_lcore = *fwd_lc;
2124 tmp_lcore.stopped = 1;
2125 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2130 * Launch packet forwarding:
2131 * - Setup per-port forwarding context.
2132 * - launch logical cores with their forwarding configuration.
2135 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2137 port_fwd_begin_t port_fwd_begin;
2142 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2143 if (port_fwd_begin != NULL) {
2144 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2145 (*port_fwd_begin)(fwd_ports_ids[i]);
2147 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2148 lc_id = fwd_lcores_cpuids[i];
2149 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2150 fwd_lcores[i]->stopped = 0;
2151 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2152 fwd_lcores[i], lc_id);
2154 printf("launch lcore %u failed - diag=%d\n",
2161 * Launch packet forwarding configuration.
2164 start_packet_forwarding(int with_tx_first)
2166 port_fwd_begin_t port_fwd_begin;
2167 port_fwd_end_t port_fwd_end;
2168 struct rte_port *port;
2172 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2173 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2175 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2176 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2178 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2179 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2180 (!nb_rxq || !nb_txq))
2181 rte_exit(EXIT_FAILURE,
2182 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2183 cur_fwd_eng->fwd_mode_name);
2185 if (all_ports_started() == 0) {
2186 printf("Not all ports were started\n");
2189 if (test_done == 0) {
2190 printf("Packet forwarding already started\n");
2196 for (i = 0; i < nb_fwd_ports; i++) {
2197 pt_id = fwd_ports_ids[i];
2198 port = &ports[pt_id];
2199 if (!port->dcb_flag) {
2200 printf("In DCB mode, all forwarding ports must "
2201 "be configured in this mode.\n");
2205 if (nb_fwd_lcores == 1) {
2206 printf("In DCB mode,the nb forwarding cores "
2207 "should be larger than 1.\n");
2216 flush_fwd_rx_queues();
2218 pkt_fwd_config_display(&cur_fwd_config);
2219 rxtx_config_display();
2222 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2223 pt_id = fwd_ports_ids[i];
2224 port = &ports[pt_id];
2225 map_port_queue_stats_mapping_registers(pt_id, port);
2227 if (with_tx_first) {
2228 port_fwd_begin = tx_only_engine.port_fwd_begin;
2229 if (port_fwd_begin != NULL) {
2230 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2231 (*port_fwd_begin)(fwd_ports_ids[i]);
2233 while (with_tx_first--) {
2234 launch_packet_forwarding(
2235 run_one_txonly_burst_on_core);
2236 rte_eal_mp_wait_lcore();
2238 port_fwd_end = tx_only_engine.port_fwd_end;
2239 if (port_fwd_end != NULL) {
2240 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2241 (*port_fwd_end)(fwd_ports_ids[i]);
2244 launch_packet_forwarding(start_pkt_forward_on_core);
2248 stop_packet_forwarding(void)
2250 port_fwd_end_t port_fwd_end;
2256 printf("Packet forwarding not started\n");
2259 printf("Telling cores to stop...");
2260 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2261 fwd_lcores[lc_id]->stopped = 1;
2262 printf("\nWaiting for lcores to finish...\n");
2263 rte_eal_mp_wait_lcore();
2264 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2265 if (port_fwd_end != NULL) {
2266 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2267 pt_id = fwd_ports_ids[i];
2268 (*port_fwd_end)(pt_id);
2272 fwd_stats_display();
2274 printf("\nDone.\n");
2279 dev_set_link_up(portid_t pid)
2281 if (rte_eth_dev_set_link_up(pid) < 0)
2282 printf("\nSet link up fail.\n");
2286 dev_set_link_down(portid_t pid)
2288 if (rte_eth_dev_set_link_down(pid) < 0)
2289 printf("\nSet link down fail.\n");
2293 all_ports_started(void)
2296 struct rte_port *port;
2298 RTE_ETH_FOREACH_DEV(pi) {
2300 /* Check if there is a port which is not started */
2301 if ((port->port_status != RTE_PORT_STARTED) &&
2302 (port->slave_flag == 0))
2306 /* No port is not started */
2311 port_is_stopped(portid_t port_id)
2313 struct rte_port *port = &ports[port_id];
2315 if ((port->port_status != RTE_PORT_STOPPED) &&
2316 (port->slave_flag == 0))
2322 all_ports_stopped(void)
2326 RTE_ETH_FOREACH_DEV(pi) {
2327 if (!port_is_stopped(pi))
2335 port_is_started(portid_t port_id)
2337 if (port_id_is_invalid(port_id, ENABLED_WARN))
2340 if (ports[port_id].port_status != RTE_PORT_STARTED)
2346 /* Configure the Rx and Tx hairpin queues for the selected port. */
2348 setup_hairpin_queues(portid_t pi)
2351 struct rte_eth_hairpin_conf hairpin_conf = {
2356 struct rte_port *port = &ports[pi];
2358 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2359 hairpin_conf.peers[0].port = pi;
2360 hairpin_conf.peers[0].queue = i + nb_rxq;
2361 diag = rte_eth_tx_hairpin_queue_setup
2362 (pi, qi, nb_txd, &hairpin_conf);
2367 /* Fail to setup rx queue, return */
2368 if (rte_atomic16_cmpset(&(port->port_status),
2370 RTE_PORT_STOPPED) == 0)
2371 printf("Port %d can not be set back "
2372 "to stopped\n", pi);
2373 printf("Fail to configure port %d hairpin "
2375 /* try to reconfigure queues next time */
2376 port->need_reconfig_queues = 1;
2379 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2380 hairpin_conf.peers[0].port = pi;
2381 hairpin_conf.peers[0].queue = i + nb_txq;
2382 diag = rte_eth_rx_hairpin_queue_setup
2383 (pi, qi, nb_rxd, &hairpin_conf);
2388 /* Fail to setup rx queue, return */
2389 if (rte_atomic16_cmpset(&(port->port_status),
2391 RTE_PORT_STOPPED) == 0)
2392 printf("Port %d can not be set back "
2393 "to stopped\n", pi);
2394 printf("Fail to configure port %d hairpin "
2396 /* try to reconfigure queues next time */
2397 port->need_reconfig_queues = 1;
2404 start_port(portid_t pid)
2406 int diag, need_check_link_status = -1;
2409 struct rte_port *port;
2410 struct rte_ether_addr mac_addr;
2411 struct rte_eth_hairpin_cap cap;
2413 if (port_id_is_invalid(pid, ENABLED_WARN))
2418 RTE_ETH_FOREACH_DEV(pi) {
2419 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2422 need_check_link_status = 0;
2424 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2425 RTE_PORT_HANDLING) == 0) {
2426 printf("Port %d is now not stopped\n", pi);
2430 if (port->need_reconfig > 0) {
2431 port->need_reconfig = 0;
2433 if (flow_isolate_all) {
2434 int ret = port_flow_isolate(pi, 1);
2436 printf("Failed to apply isolated"
2437 " mode on port %d\n", pi);
2441 configure_rxtx_dump_callbacks(0);
2442 printf("Configuring Port %d (socket %u)\n", pi,
2444 if (nb_hairpinq > 0 &&
2445 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2446 printf("Port %d doesn't support hairpin "
2450 /* configure port */
2451 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2452 nb_txq + nb_hairpinq,
2455 if (rte_atomic16_cmpset(&(port->port_status),
2456 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2457 printf("Port %d can not be set back "
2458 "to stopped\n", pi);
2459 printf("Fail to configure port %d\n", pi);
2460 /* try to reconfigure port next time */
2461 port->need_reconfig = 1;
2465 if (port->need_reconfig_queues > 0) {
2466 port->need_reconfig_queues = 0;
2467 /* setup tx queues */
2468 for (qi = 0; qi < nb_txq; qi++) {
2469 if ((numa_support) &&
2470 (txring_numa[pi] != NUMA_NO_CONFIG))
2471 diag = rte_eth_tx_queue_setup(pi, qi,
2472 port->nb_tx_desc[qi],
2474 &(port->tx_conf[qi]));
2476 diag = rte_eth_tx_queue_setup(pi, qi,
2477 port->nb_tx_desc[qi],
2479 &(port->tx_conf[qi]));
2484 /* Fail to setup tx queue, return */
2485 if (rte_atomic16_cmpset(&(port->port_status),
2487 RTE_PORT_STOPPED) == 0)
2488 printf("Port %d can not be set back "
2489 "to stopped\n", pi);
2490 printf("Fail to configure port %d tx queues\n",
2492 /* try to reconfigure queues next time */
2493 port->need_reconfig_queues = 1;
2496 for (qi = 0; qi < nb_rxq; qi++) {
2497 /* setup rx queues */
2498 if ((numa_support) &&
2499 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2500 struct rte_mempool * mp =
2501 mbuf_pool_find(rxring_numa[pi]);
2503 printf("Failed to setup RX queue:"
2504 "No mempool allocation"
2505 " on the socket %d\n",
2510 diag = rte_eth_rx_queue_setup(pi, qi,
2511 port->nb_rx_desc[qi],
2513 &(port->rx_conf[qi]),
2516 struct rte_mempool *mp =
2517 mbuf_pool_find(port->socket_id);
2519 printf("Failed to setup RX queue:"
2520 "No mempool allocation"
2521 " on the socket %d\n",
2525 diag = rte_eth_rx_queue_setup(pi, qi,
2526 port->nb_rx_desc[qi],
2528 &(port->rx_conf[qi]),
2534 /* Fail to setup rx queue, return */
2535 if (rte_atomic16_cmpset(&(port->port_status),
2537 RTE_PORT_STOPPED) == 0)
2538 printf("Port %d can not be set back "
2539 "to stopped\n", pi);
2540 printf("Fail to configure port %d rx queues\n",
2542 /* try to reconfigure queues next time */
2543 port->need_reconfig_queues = 1;
2546 /* setup hairpin queues */
2547 if (setup_hairpin_queues(pi) != 0)
2550 configure_rxtx_dump_callbacks(verbose_level);
2552 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2556 "Port %d: Failed to disable Ptype parsing\n",
2561 if (rte_eth_dev_start(pi) < 0) {
2562 printf("Fail to start port %d\n", pi);
2564 /* Fail to setup rx queue, return */
2565 if (rte_atomic16_cmpset(&(port->port_status),
2566 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2567 printf("Port %d can not be set back to "
2572 if (rte_atomic16_cmpset(&(port->port_status),
2573 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2574 printf("Port %d can not be set into started\n", pi);
2576 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2577 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2578 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2579 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2580 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2582 /* at least one port started, need checking link status */
2583 need_check_link_status = 1;
2586 if (need_check_link_status == 1 && !no_link_check)
2587 check_all_ports_link_status(RTE_PORT_ALL);
2588 else if (need_check_link_status == 0)
2589 printf("Please stop the ports first\n");
2596 stop_port(portid_t pid)
2599 struct rte_port *port;
2600 int need_check_link_status = 0;
2607 if (port_id_is_invalid(pid, ENABLED_WARN))
2610 printf("Stopping ports...\n");
2612 RTE_ETH_FOREACH_DEV(pi) {
2613 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2616 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2617 printf("Please remove port %d from forwarding configuration.\n", pi);
2621 if (port_is_bonding_slave(pi)) {
2622 printf("Please remove port %d from bonded device.\n", pi);
2627 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2628 RTE_PORT_HANDLING) == 0)
2631 rte_eth_dev_stop(pi);
2633 if (rte_atomic16_cmpset(&(port->port_status),
2634 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2635 printf("Port %d can not be set into stopped\n", pi);
2636 need_check_link_status = 1;
2638 if (need_check_link_status && !no_link_check)
2639 check_all_ports_link_status(RTE_PORT_ALL);
2645 remove_invalid_ports_in(portid_t *array, portid_t *total)
2648 portid_t new_total = 0;
2650 for (i = 0; i < *total; i++)
2651 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2652 array[new_total] = array[i];
2659 remove_invalid_ports(void)
2661 remove_invalid_ports_in(ports_ids, &nb_ports);
2662 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2663 nb_cfg_ports = nb_fwd_ports;
2667 close_port(portid_t pid)
2670 struct rte_port *port;
2672 if (port_id_is_invalid(pid, ENABLED_WARN))
2675 printf("Closing ports...\n");
2677 RTE_ETH_FOREACH_DEV(pi) {
2678 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2681 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2682 printf("Please remove port %d from forwarding configuration.\n", pi);
2686 if (port_is_bonding_slave(pi)) {
2687 printf("Please remove port %d from bonded device.\n", pi);
2692 if (rte_atomic16_cmpset(&(port->port_status),
2693 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2694 printf("Port %d is already closed\n", pi);
2698 if (rte_atomic16_cmpset(&(port->port_status),
2699 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2700 printf("Port %d is now not stopped\n", pi);
2704 if (port->flow_list)
2705 port_flow_flush(pi);
2706 rte_eth_dev_close(pi);
2708 remove_invalid_ports();
2710 if (rte_atomic16_cmpset(&(port->port_status),
2711 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2712 printf("Port %d cannot be set to closed\n", pi);
2719 reset_port(portid_t pid)
2723 struct rte_port *port;
2725 if (port_id_is_invalid(pid, ENABLED_WARN))
2728 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2729 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2730 printf("Can not reset port(s), please stop port(s) first.\n");
2734 printf("Resetting ports...\n");
2736 RTE_ETH_FOREACH_DEV(pi) {
2737 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2740 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2741 printf("Please remove port %d from forwarding "
2742 "configuration.\n", pi);
2746 if (port_is_bonding_slave(pi)) {
2747 printf("Please remove port %d from bonded device.\n",
2752 diag = rte_eth_dev_reset(pi);
2755 port->need_reconfig = 1;
2756 port->need_reconfig_queues = 1;
2758 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2766 attach_port(char *identifier)
2769 struct rte_dev_iterator iterator;
2771 printf("Attaching a new port...\n");
2773 if (identifier == NULL) {
2774 printf("Invalid parameters are specified\n");
2778 if (rte_dev_probe(identifier) < 0) {
2779 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2783 /* first attach mode: event */
2784 if (setup_on_probe_event) {
2785 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2786 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2787 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2788 ports[pi].need_setup != 0)
2789 setup_attached_port(pi);
2793 /* second attach mode: iterator */
2794 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2795 /* setup ports matching the devargs used for probing */
2796 if (port_is_forwarding(pi))
2797 continue; /* port was already attached before */
2798 setup_attached_port(pi);
2803 setup_attached_port(portid_t pi)
2805 unsigned int socket_id;
2808 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2809 /* if socket_id is invalid, set to the first available socket. */
2810 if (check_socket_id(socket_id) < 0)
2811 socket_id = socket_ids[0];
2812 reconfig(pi, socket_id);
2813 ret = rte_eth_promiscuous_enable(pi);
2815 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2816 pi, rte_strerror(-ret));
2818 ports_ids[nb_ports++] = pi;
2819 fwd_ports_ids[nb_fwd_ports++] = pi;
2820 nb_cfg_ports = nb_fwd_ports;
2821 ports[pi].need_setup = 0;
2822 ports[pi].port_status = RTE_PORT_STOPPED;
2824 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2829 detach_device(struct rte_device *dev)
2834 printf("Device already removed\n");
2838 printf("Removing a device...\n");
2840 if (rte_dev_remove(dev) < 0) {
2841 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2844 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2845 /* reset mapping between old ports and removed device */
2846 rte_eth_devices[sibling].device = NULL;
2847 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2848 /* sibling ports are forced to be closed */
2849 ports[sibling].port_status = RTE_PORT_CLOSED;
2850 printf("Port %u is closed\n", sibling);
2854 remove_invalid_ports();
2856 printf("Device is detached\n");
2857 printf("Now total ports is %d\n", nb_ports);
2863 detach_port_device(portid_t port_id)
2865 if (port_id_is_invalid(port_id, ENABLED_WARN))
2868 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2869 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2870 printf("Port not stopped\n");
2873 printf("Port was not closed\n");
2874 if (ports[port_id].flow_list)
2875 port_flow_flush(port_id);
2878 detach_device(rte_eth_devices[port_id].device);
2882 detach_devargs(char *identifier)
2884 struct rte_dev_iterator iterator;
2885 struct rte_devargs da;
2888 printf("Removing a device...\n");
2890 memset(&da, 0, sizeof(da));
2891 if (rte_devargs_parsef(&da, "%s", identifier)) {
2892 printf("cannot parse identifier\n");
2898 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2899 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2900 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2901 printf("Port %u not stopped\n", port_id);
2902 rte_eth_iterator_cleanup(&iterator);
2906 /* sibling ports are forced to be closed */
2907 if (ports[port_id].flow_list)
2908 port_flow_flush(port_id);
2909 ports[port_id].port_status = RTE_PORT_CLOSED;
2910 printf("Port %u is now closed\n", port_id);
2914 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2915 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2916 da.name, da.bus->name);
2920 remove_invalid_ports();
2922 printf("Device %s is detached\n", identifier);
2923 printf("Now total ports is %d\n", nb_ports);
2935 stop_packet_forwarding();
2937 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2939 if (mp_alloc_type == MP_ALLOC_ANON)
2940 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2944 if (ports != NULL) {
2946 RTE_ETH_FOREACH_DEV(pt_id) {
2947 printf("\nStopping port %d...\n", pt_id);
2951 RTE_ETH_FOREACH_DEV(pt_id) {
2952 printf("\nShutting down port %d...\n", pt_id);
2959 ret = rte_dev_event_monitor_stop();
2962 "fail to stop device event monitor.");
2966 ret = rte_dev_event_callback_unregister(NULL,
2967 dev_event_callback, NULL);
2970 "fail to unregister device event callback.\n");
2974 ret = rte_dev_hotplug_handle_disable();
2977 "fail to disable hotplug handling.\n");
2981 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2983 rte_mempool_free(mempools[i]);
2986 printf("\nBye...\n");
2989 typedef void (*cmd_func_t)(void);
2990 struct pmd_test_command {
2991 const char *cmd_name;
2992 cmd_func_t cmd_func;
2995 /* Check the link status of all ports in up to 9s, and print them finally */
2997 check_all_ports_link_status(uint32_t port_mask)
2999 #define CHECK_INTERVAL 100 /* 100ms */
3000 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3002 uint8_t count, all_ports_up, print_flag = 0;
3003 struct rte_eth_link link;
3006 printf("Checking link statuses...\n");
3008 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3010 RTE_ETH_FOREACH_DEV(portid) {
3011 if ((port_mask & (1 << portid)) == 0)
3013 memset(&link, 0, sizeof(link));
3014 ret = rte_eth_link_get_nowait(portid, &link);
3017 if (print_flag == 1)
3018 printf("Port %u link get failed: %s\n",
3019 portid, rte_strerror(-ret));
3022 /* print link status if flag set */
3023 if (print_flag == 1) {
3024 if (link.link_status)
3026 "Port%d Link Up. speed %u Mbps- %s\n",
3027 portid, link.link_speed,
3028 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3029 ("full-duplex") : ("half-duplex"));
3031 printf("Port %d Link Down\n", portid);
3034 /* clear all_ports_up flag if any link down */
3035 if (link.link_status == ETH_LINK_DOWN) {
3040 /* after finally printing all link status, get out */
3041 if (print_flag == 1)
3044 if (all_ports_up == 0) {
3046 rte_delay_ms(CHECK_INTERVAL);
3049 /* set the print_flag if all ports up or timeout */
3050 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3060 * This callback is for remove a port for a device. It has limitation because
3061 * it is not for multiple port removal for a device.
3062 * TODO: the device detach invoke will plan to be removed from user side to
3063 * eal. And convert all PMDs to free port resources on ether device closing.
3066 rmv_port_callback(void *arg)
3068 int need_to_start = 0;
3069 int org_no_link_check = no_link_check;
3070 portid_t port_id = (intptr_t)arg;
3071 struct rte_device *dev;
3073 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3075 if (!test_done && port_is_forwarding(port_id)) {
3077 stop_packet_forwarding();
3081 no_link_check = org_no_link_check;
3083 /* Save rte_device pointer before closing ethdev port */
3084 dev = rte_eth_devices[port_id].device;
3085 close_port(port_id);
3086 detach_device(dev); /* might be already removed or have more ports */
3089 start_packet_forwarding(0);
3092 /* This function is used by the interrupt thread */
3094 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3097 RTE_SET_USED(param);
3098 RTE_SET_USED(ret_param);
3100 if (type >= RTE_ETH_EVENT_MAX) {
3101 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3102 port_id, __func__, type);
3104 } else if (event_print_mask & (UINT32_C(1) << type)) {
3105 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3106 eth_event_desc[type]);
3111 case RTE_ETH_EVENT_NEW:
3112 ports[port_id].need_setup = 1;
3113 ports[port_id].port_status = RTE_PORT_HANDLING;
3115 case RTE_ETH_EVENT_INTR_RMV:
3116 if (port_id_is_invalid(port_id, DISABLED_WARN))
3118 if (rte_eal_alarm_set(100000,
3119 rmv_port_callback, (void *)(intptr_t)port_id))
3120 fprintf(stderr, "Could not set up deferred device removal\n");
3129 register_eth_event_callback(void)
3132 enum rte_eth_event_type event;
3134 for (event = RTE_ETH_EVENT_UNKNOWN;
3135 event < RTE_ETH_EVENT_MAX; event++) {
3136 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3141 TESTPMD_LOG(ERR, "Failed to register callback for "
3142 "%s event\n", eth_event_desc[event]);
3150 /* This function is used by the interrupt thread */
3152 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3153 __rte_unused void *arg)
3158 if (type >= RTE_DEV_EVENT_MAX) {
3159 fprintf(stderr, "%s called upon invalid event %d\n",
3165 case RTE_DEV_EVENT_REMOVE:
3166 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3168 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3170 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3175 * Because the user's callback is invoked in eal interrupt
3176 * callback, the interrupt callback need to be finished before
3177 * it can be unregistered when detaching device. So finish
3178 * callback soon and use a deferred removal to detach device
3179 * is need. It is a workaround, once the device detaching be
3180 * moved into the eal in the future, the deferred removal could
3183 if (rte_eal_alarm_set(100000,
3184 rmv_port_callback, (void *)(intptr_t)port_id))
3186 "Could not set up deferred device removal\n");
3188 case RTE_DEV_EVENT_ADD:
3189 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3191 /* TODO: After finish kernel driver binding,
3192 * begin to attach port.
3201 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3205 uint8_t mapping_found = 0;
3207 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3208 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3209 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3210 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3211 tx_queue_stats_mappings[i].queue_id,
3212 tx_queue_stats_mappings[i].stats_counter_id);
3219 port->tx_queue_stats_mapping_enabled = 1;
3224 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3228 uint8_t mapping_found = 0;
3230 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3231 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3232 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3233 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3234 rx_queue_stats_mappings[i].queue_id,
3235 rx_queue_stats_mappings[i].stats_counter_id);
3242 port->rx_queue_stats_mapping_enabled = 1;
3247 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3251 diag = set_tx_queue_stats_mapping_registers(pi, port);
3253 if (diag == -ENOTSUP) {
3254 port->tx_queue_stats_mapping_enabled = 0;
3255 printf("TX queue stats mapping not supported port id=%d\n", pi);
3258 rte_exit(EXIT_FAILURE,
3259 "set_tx_queue_stats_mapping_registers "
3260 "failed for port id=%d diag=%d\n",
3264 diag = set_rx_queue_stats_mapping_registers(pi, port);
3266 if (diag == -ENOTSUP) {
3267 port->rx_queue_stats_mapping_enabled = 0;
3268 printf("RX queue stats mapping not supported port id=%d\n", pi);
3271 rte_exit(EXIT_FAILURE,
3272 "set_rx_queue_stats_mapping_registers "
3273 "failed for port id=%d diag=%d\n",
3279 rxtx_port_config(struct rte_port *port)
3284 for (qid = 0; qid < nb_rxq; qid++) {
3285 offloads = port->rx_conf[qid].offloads;
3286 port->rx_conf[qid] = port->dev_info.default_rxconf;
3288 port->rx_conf[qid].offloads = offloads;
3290 /* Check if any Rx parameters have been passed */
3291 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3292 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3294 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3295 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3297 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3298 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3300 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3301 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3303 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3304 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3306 port->nb_rx_desc[qid] = nb_rxd;
3309 for (qid = 0; qid < nb_txq; qid++) {
3310 offloads = port->tx_conf[qid].offloads;
3311 port->tx_conf[qid] = port->dev_info.default_txconf;
3313 port->tx_conf[qid].offloads = offloads;
3315 /* Check if any Tx parameters have been passed */
3316 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3317 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3319 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3320 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3322 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3323 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3325 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3326 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3328 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3329 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3331 port->nb_tx_desc[qid] = nb_txd;
3336 init_port_config(void)
3339 struct rte_port *port;
3342 RTE_ETH_FOREACH_DEV(pid) {
3344 port->dev_conf.fdir_conf = fdir_conf;
3346 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3351 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3352 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3353 rss_hf & port->dev_info.flow_type_rss_offloads;
3355 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3356 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3359 if (port->dcb_flag == 0) {
3360 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3361 port->dev_conf.rxmode.mq_mode =
3362 (enum rte_eth_rx_mq_mode)
3363 (rx_mq_mode & ETH_MQ_RX_RSS);
3365 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3368 rxtx_port_config(port);
3370 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3374 map_port_queue_stats_mapping_registers(pid, port);
3375 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3376 rte_pmd_ixgbe_bypass_init(pid);
3379 if (lsc_interrupt &&
3380 (rte_eth_devices[pid].data->dev_flags &
3381 RTE_ETH_DEV_INTR_LSC))
3382 port->dev_conf.intr_conf.lsc = 1;
3383 if (rmv_interrupt &&
3384 (rte_eth_devices[pid].data->dev_flags &
3385 RTE_ETH_DEV_INTR_RMV))
3386 port->dev_conf.intr_conf.rmv = 1;
3390 void set_port_slave_flag(portid_t slave_pid)
3392 struct rte_port *port;
3394 port = &ports[slave_pid];
3395 port->slave_flag = 1;
3398 void clear_port_slave_flag(portid_t slave_pid)
3400 struct rte_port *port;
3402 port = &ports[slave_pid];
3403 port->slave_flag = 0;
3406 uint8_t port_is_bonding_slave(portid_t slave_pid)
3408 struct rte_port *port;
3410 port = &ports[slave_pid];
3411 if ((rte_eth_devices[slave_pid].data->dev_flags &
3412 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3417 const uint16_t vlan_tags[] = {
3418 0, 1, 2, 3, 4, 5, 6, 7,
3419 8, 9, 10, 11, 12, 13, 14, 15,
3420 16, 17, 18, 19, 20, 21, 22, 23,
3421 24, 25, 26, 27, 28, 29, 30, 31
3425 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3426 enum dcb_mode_enable dcb_mode,
3427 enum rte_eth_nb_tcs num_tcs,
3432 struct rte_eth_rss_conf rss_conf;
3435 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3436 * given above, and the number of traffic classes available for use.
3438 if (dcb_mode == DCB_VT_ENABLED) {
3439 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3440 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3441 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3442 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3444 /* VMDQ+DCB RX and TX configurations */
3445 vmdq_rx_conf->enable_default_pool = 0;
3446 vmdq_rx_conf->default_pool = 0;
3447 vmdq_rx_conf->nb_queue_pools =
3448 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3449 vmdq_tx_conf->nb_queue_pools =
3450 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3452 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3453 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3454 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3455 vmdq_rx_conf->pool_map[i].pools =
3456 1 << (i % vmdq_rx_conf->nb_queue_pools);
3458 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3459 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3460 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3463 /* set DCB mode of RX and TX of multiple queues */
3464 eth_conf->rxmode.mq_mode =
3465 (enum rte_eth_rx_mq_mode)
3466 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3467 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3469 struct rte_eth_dcb_rx_conf *rx_conf =
3470 ð_conf->rx_adv_conf.dcb_rx_conf;
3471 struct rte_eth_dcb_tx_conf *tx_conf =
3472 ð_conf->tx_adv_conf.dcb_tx_conf;
3474 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3476 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3480 rx_conf->nb_tcs = num_tcs;
3481 tx_conf->nb_tcs = num_tcs;
3483 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3484 rx_conf->dcb_tc[i] = i % num_tcs;
3485 tx_conf->dcb_tc[i] = i % num_tcs;
3488 eth_conf->rxmode.mq_mode =
3489 (enum rte_eth_rx_mq_mode)
3490 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3491 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3492 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3496 eth_conf->dcb_capability_en =
3497 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3499 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3505 init_port_dcb_config(portid_t pid,
3506 enum dcb_mode_enable dcb_mode,
3507 enum rte_eth_nb_tcs num_tcs,
3510 struct rte_eth_conf port_conf;
3511 struct rte_port *rte_port;
3515 rte_port = &ports[pid];
3517 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3518 /* Enter DCB configuration status */
3521 port_conf.rxmode = rte_port->dev_conf.rxmode;
3522 port_conf.txmode = rte_port->dev_conf.txmode;
3524 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3525 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3528 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3530 /* re-configure the device . */
3531 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3535 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3539 /* If dev_info.vmdq_pool_base is greater than 0,
3540 * the queue id of vmdq pools is started after pf queues.
3542 if (dcb_mode == DCB_VT_ENABLED &&
3543 rte_port->dev_info.vmdq_pool_base > 0) {
3544 printf("VMDQ_DCB multi-queue mode is nonsensical"
3545 " for port %d.", pid);
3549 /* Assume the ports in testpmd have the same dcb capability
3550 * and has the same number of rxq and txq in dcb mode
3552 if (dcb_mode == DCB_VT_ENABLED) {
3553 if (rte_port->dev_info.max_vfs > 0) {
3554 nb_rxq = rte_port->dev_info.nb_rx_queues;
3555 nb_txq = rte_port->dev_info.nb_tx_queues;
3557 nb_rxq = rte_port->dev_info.max_rx_queues;
3558 nb_txq = rte_port->dev_info.max_tx_queues;
3561 /*if vt is disabled, use all pf queues */
3562 if (rte_port->dev_info.vmdq_pool_base == 0) {
3563 nb_rxq = rte_port->dev_info.max_rx_queues;
3564 nb_txq = rte_port->dev_info.max_tx_queues;
3566 nb_rxq = (queueid_t)num_tcs;
3567 nb_txq = (queueid_t)num_tcs;
3571 rx_free_thresh = 64;
3573 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3575 rxtx_port_config(rte_port);
3577 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3578 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3579 rx_vft_set(pid, vlan_tags[i], 1);
3581 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3585 map_port_queue_stats_mapping_registers(pid, rte_port);
3587 rte_port->dcb_flag = 1;
3595 /* Configuration of Ethernet ports. */
3596 ports = rte_zmalloc("testpmd: ports",
3597 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3598 RTE_CACHE_LINE_SIZE);
3599 if (ports == NULL) {
3600 rte_exit(EXIT_FAILURE,
3601 "rte_zmalloc(%d struct rte_port) failed\n",
3605 /* Initialize ports NUMA structures */
3606 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3607 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3608 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3622 const char clr[] = { 27, '[', '2', 'J', '\0' };
3623 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3625 /* Clear screen and move to top left */
3626 printf("%s%s", clr, top_left);
3628 printf("\nPort statistics ====================================");
3629 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3630 nic_stats_display(fwd_ports_ids[i]);
3636 signal_handler(int signum)
3638 if (signum == SIGINT || signum == SIGTERM) {
3639 printf("\nSignal %d received, preparing to exit...\n",
3641 #ifdef RTE_LIBRTE_PDUMP
3642 /* uninitialize packet capture framework */
3645 #ifdef RTE_LIBRTE_LATENCY_STATS
3646 if (latencystats_enabled != 0)
3647 rte_latencystats_uninit();
3650 /* Set flag to indicate the force termination. */
3652 /* exit with the expected status */
3653 signal(signum, SIG_DFL);
3654 kill(getpid(), signum);
3659 main(int argc, char** argv)
3666 signal(SIGINT, signal_handler);
3667 signal(SIGTERM, signal_handler);
3669 testpmd_logtype = rte_log_register("testpmd");
3670 if (testpmd_logtype < 0)
3671 rte_exit(EXIT_FAILURE, "Cannot register log type");
3672 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3674 diag = rte_eal_init(argc, argv);
3676 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3677 rte_strerror(rte_errno));
3679 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3680 rte_exit(EXIT_FAILURE,
3681 "Secondary process type not supported.\n");
3683 ret = register_eth_event_callback();
3685 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3687 #ifdef RTE_LIBRTE_PDUMP
3688 /* initialize packet capture framework */
3693 RTE_ETH_FOREACH_DEV(port_id) {
3694 ports_ids[count] = port_id;
3697 nb_ports = (portid_t) count;
3699 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3701 /* allocate port structures, and init them */
3704 set_def_fwd_config();
3706 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3707 "Check the core mask argument\n");
3709 /* Bitrate/latency stats disabled by default */
3710 #ifdef RTE_LIBRTE_BITRATE
3711 bitrate_enabled = 0;
3713 #ifdef RTE_LIBRTE_LATENCY_STATS
3714 latencystats_enabled = 0;
3717 /* on FreeBSD, mlockall() is disabled by default */
3718 #ifdef RTE_EXEC_ENV_FREEBSD
3727 launch_args_parse(argc, argv);
3729 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3730 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3734 if (tx_first && interactive)
3735 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3736 "interactive mode.\n");
3738 if (tx_first && lsc_interrupt) {
3739 printf("Warning: lsc_interrupt needs to be off when "
3740 " using tx_first. Disabling.\n");
3744 if (!nb_rxq && !nb_txq)
3745 printf("Warning: Either rx or tx queues should be non-zero\n");
3747 if (nb_rxq > 1 && nb_rxq > nb_txq)
3748 printf("Warning: nb_rxq=%d enables RSS configuration, "
3749 "but nb_txq=%d will prevent to fully test it.\n",
3755 ret = rte_dev_hotplug_handle_enable();
3758 "fail to enable hotplug handling.");
3762 ret = rte_dev_event_monitor_start();
3765 "fail to start device event monitoring.");
3769 ret = rte_dev_event_callback_register(NULL,
3770 dev_event_callback, NULL);
3773 "fail to register device event callback\n");
3778 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3779 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3781 /* set all ports to promiscuous mode by default */
3782 RTE_ETH_FOREACH_DEV(port_id) {
3783 ret = rte_eth_promiscuous_enable(port_id);
3785 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3786 port_id, rte_strerror(-ret));
3789 /* Init metrics library */
3790 rte_metrics_init(rte_socket_id());
3792 #ifdef RTE_LIBRTE_LATENCY_STATS
3793 if (latencystats_enabled != 0) {
3794 int ret = rte_latencystats_init(1, NULL);
3796 printf("Warning: latencystats init()"
3797 " returned error %d\n", ret);
3798 printf("Latencystats running on lcore %d\n",
3799 latencystats_lcore_id);
3803 /* Setup bitrate stats */
3804 #ifdef RTE_LIBRTE_BITRATE
3805 if (bitrate_enabled != 0) {
3806 bitrate_data = rte_stats_bitrate_create();
3807 if (bitrate_data == NULL)
3808 rte_exit(EXIT_FAILURE,
3809 "Could not allocate bitrate data.\n");
3810 rte_stats_bitrate_reg(bitrate_data);
3814 #ifdef RTE_LIBRTE_CMDLINE
3815 if (strlen(cmdline_filename) != 0)
3816 cmdline_read_from_file(cmdline_filename);
3818 if (interactive == 1) {
3820 printf("Start automatic packet forwarding\n");
3821 start_packet_forwarding(0);
3833 printf("No commandline core given, start packet forwarding\n");
3834 start_packet_forwarding(tx_first);
3835 if (stats_period != 0) {
3836 uint64_t prev_time = 0, cur_time, diff_time = 0;
3837 uint64_t timer_period;
3839 /* Convert to number of cycles */
3840 timer_period = stats_period * rte_get_timer_hz();
3842 while (f_quit == 0) {
3843 cur_time = rte_get_timer_cycles();
3844 diff_time += cur_time - prev_time;
3846 if (diff_time >= timer_period) {
3848 /* Reset the timer */
3851 /* Sleep to avoid unnecessary checks */
3852 prev_time = cur_time;
3857 printf("Press enter to exit\n");
3858 rc = read(0, &c, 1);
3864 ret = rte_eal_cleanup();
3866 rte_exit(EXIT_FAILURE,
3867 "EAL cleanup failed: %s\n", strerror(-ret));
3869 return EXIT_SUCCESS;