1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
239 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
240 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
243 * Configurable number of RX/TX ring descriptors.
244 * Defaults are supplied by drivers via ethdev.
246 #define RTE_TEST_RX_DESC_DEFAULT 0
247 #define RTE_TEST_TX_DESC_DEFAULT 0
248 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
249 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
251 #define RTE_PMD_PARAM_UNSET -1
253 * Configurable values of RX and TX ring threshold registers.
256 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
258 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
262 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
265 * Configurable value of RX free threshold.
267 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
270 * Configurable value of RX drop enable.
272 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
275 * Configurable value of TX free threshold.
277 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
280 * Configurable value of TX RS bit threshold.
282 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
285 * Configurable value of buffered packets before sending.
287 uint16_t noisy_tx_sw_bufsz;
290 * Configurable value of packet buffer timeout.
292 uint16_t noisy_tx_sw_buf_flush_time;
295 * Configurable value for size of VNF internal memory area
296 * used for simulating noisy neighbour behaviour
298 uint64_t noisy_lkup_mem_sz;
301 * Configurable value of number of random writes done in
302 * VNF simulation memory area.
304 uint64_t noisy_lkup_num_writes;
307 * Configurable value of number of random reads done in
308 * VNF simulation memory area.
310 uint64_t noisy_lkup_num_reads;
313 * Configurable value of number of random reads/writes done in
314 * VNF simulation memory area.
316 uint64_t noisy_lkup_num_reads_writes;
319 * Receive Side Scaling (RSS) configuration.
321 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
324 * Port topology configuration
326 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
329 * Avoids to flush all the RX streams before starts forwarding.
331 uint8_t no_flush_rx = 0; /* flush by default */
334 * Flow API isolated mode.
336 uint8_t flow_isolate_all;
339 * Avoids to check link status when starting/stopping a port.
341 uint8_t no_link_check = 0; /* check by default */
344 * Don't automatically start all ports in interactive mode.
346 uint8_t no_device_start = 0;
349 * Enable link status change notification
351 uint8_t lsc_interrupt = 1; /* enabled by default */
354 * Enable device removal notification.
356 uint8_t rmv_interrupt = 1; /* enabled by default */
358 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
360 /* After attach, port setup is called on event or by iterator */
361 bool setup_on_probe_event = true;
363 /* Clear ptypes on port initialization. */
364 uint8_t clear_ptypes = true;
366 /* Pretty printing of ethdev events */
367 static const char * const eth_event_desc[] = {
368 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
369 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
370 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
371 [RTE_ETH_EVENT_INTR_RESET] = "reset",
372 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
373 [RTE_ETH_EVENT_IPSEC] = "IPsec",
374 [RTE_ETH_EVENT_MACSEC] = "MACsec",
375 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
376 [RTE_ETH_EVENT_NEW] = "device probed",
377 [RTE_ETH_EVENT_DESTROY] = "device released",
378 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
379 [RTE_ETH_EVENT_MAX] = NULL,
383 * Display or mask ether events
384 * Default to all events except VF_MBOX
386 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
387 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
388 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
389 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
390 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
391 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
392 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
393 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
395 * Decide if all memory are locked for performance.
400 * NIC bypass mode configuration options.
403 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
404 /* The NIC bypass watchdog timeout. */
405 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
409 #ifdef RTE_LIBRTE_LATENCY_STATS
412 * Set when latency stats is enabled in the commandline
414 uint8_t latencystats_enabled;
417 * Lcore ID to serive latency statistics.
419 lcoreid_t latencystats_lcore_id = -1;
424 * Ethernet device configuration.
426 struct rte_eth_rxmode rx_mode = {
427 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
428 /**< Default maximum frame length. */
431 struct rte_eth_txmode tx_mode = {
432 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
435 struct rte_fdir_conf fdir_conf = {
436 .mode = RTE_FDIR_MODE_NONE,
437 .pballoc = RTE_FDIR_PBALLOC_64K,
438 .status = RTE_FDIR_REPORT_STATUS,
440 .vlan_tci_mask = 0xFFEF,
442 .src_ip = 0xFFFFFFFF,
443 .dst_ip = 0xFFFFFFFF,
446 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
447 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
449 .src_port_mask = 0xFFFF,
450 .dst_port_mask = 0xFFFF,
451 .mac_addr_byte_mask = 0xFF,
452 .tunnel_type_mask = 1,
453 .tunnel_id_mask = 0xFFFFFFFF,
458 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
460 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
461 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
463 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
464 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
466 uint16_t nb_tx_queue_stats_mappings = 0;
467 uint16_t nb_rx_queue_stats_mappings = 0;
470 * Display zero values by default for xstats
472 uint8_t xstats_hide_zero;
474 unsigned int num_sockets = 0;
475 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
477 #ifdef RTE_LIBRTE_BITRATE
478 /* Bitrate statistics */
479 struct rte_stats_bitrates *bitrate_data;
480 lcoreid_t bitrate_lcore_id;
481 uint8_t bitrate_enabled;
484 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
485 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
488 * hexadecimal bitmask of RX mq mode can be enabled.
490 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
492 /* Forward function declarations */
493 static void setup_attached_port(portid_t pi);
494 static void map_port_queue_stats_mapping_registers(portid_t pi,
495 struct rte_port *port);
496 static void check_all_ports_link_status(uint32_t port_mask);
497 static int eth_event_callback(portid_t port_id,
498 enum rte_eth_event_type type,
499 void *param, void *ret_param);
500 static void dev_event_callback(const char *device_name,
501 enum rte_dev_event_type type,
505 * Check if all the ports are started.
506 * If yes, return positive value. If not, return zero.
508 static int all_ports_started(void);
510 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
511 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
513 /* Holds the registered mbuf dynamic flags names. */
514 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
517 * Helper function to check if socket is already discovered.
518 * If yes, return positive value. If not, return zero.
521 new_socket_id(unsigned int socket_id)
525 for (i = 0; i < num_sockets; i++) {
526 if (socket_ids[i] == socket_id)
533 * Setup default configuration.
536 set_default_fwd_lcores_config(void)
540 unsigned int sock_num;
543 for (i = 0; i < RTE_MAX_LCORE; i++) {
544 if (!rte_lcore_is_enabled(i))
546 sock_num = rte_lcore_to_socket_id(i);
547 if (new_socket_id(sock_num)) {
548 if (num_sockets >= RTE_MAX_NUMA_NODES) {
549 rte_exit(EXIT_FAILURE,
550 "Total sockets greater than %u\n",
553 socket_ids[num_sockets++] = sock_num;
555 if (i == rte_get_master_lcore())
557 fwd_lcores_cpuids[nb_lc++] = i;
559 nb_lcores = (lcoreid_t) nb_lc;
560 nb_cfg_lcores = nb_lcores;
565 set_def_peer_eth_addrs(void)
569 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
570 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
571 peer_eth_addrs[i].addr_bytes[5] = i;
576 set_default_fwd_ports_config(void)
581 RTE_ETH_FOREACH_DEV(pt_id) {
582 fwd_ports_ids[i++] = pt_id;
584 /* Update sockets info according to the attached device */
585 int socket_id = rte_eth_dev_socket_id(pt_id);
586 if (socket_id >= 0 && new_socket_id(socket_id)) {
587 if (num_sockets >= RTE_MAX_NUMA_NODES) {
588 rte_exit(EXIT_FAILURE,
589 "Total sockets greater than %u\n",
592 socket_ids[num_sockets++] = socket_id;
596 nb_cfg_ports = nb_ports;
597 nb_fwd_ports = nb_ports;
601 set_def_fwd_config(void)
603 set_default_fwd_lcores_config();
604 set_def_peer_eth_addrs();
605 set_default_fwd_ports_config();
608 /* extremely pessimistic estimation of memory required to create a mempool */
610 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
612 unsigned int n_pages, mbuf_per_pg, leftover;
613 uint64_t total_mem, mbuf_mem, obj_sz;
615 /* there is no good way to predict how much space the mempool will
616 * occupy because it will allocate chunks on the fly, and some of those
617 * will come from default DPDK memory while some will come from our
618 * external memory, so just assume 128MB will be enough for everyone.
620 uint64_t hdr_mem = 128 << 20;
622 /* account for possible non-contiguousness */
623 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
625 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
629 mbuf_per_pg = pgsz / obj_sz;
630 leftover = (nb_mbufs % mbuf_per_pg) > 0;
631 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
633 mbuf_mem = n_pages * pgsz;
635 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
637 if (total_mem > SIZE_MAX) {
638 TESTPMD_LOG(ERR, "Memory size too big\n");
641 *out = (size_t)total_mem;
647 pagesz_flags(uint64_t page_sz)
649 /* as per mmap() manpage, all page sizes are log2 of page size
650 * shifted by MAP_HUGE_SHIFT
652 int log2 = rte_log2_u64(page_sz);
654 return (log2 << HUGE_SHIFT);
658 alloc_mem(size_t memsz, size_t pgsz, bool huge)
663 /* allocate anonymous hugepages */
664 flags = MAP_ANONYMOUS | MAP_PRIVATE;
666 flags |= HUGE_FLAG | pagesz_flags(pgsz);
668 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
669 if (addr == MAP_FAILED)
675 struct extmem_param {
679 rte_iova_t *iova_table;
680 unsigned int iova_table_len;
684 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
687 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
688 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
689 unsigned int cur_page, n_pages, pgsz_idx;
690 size_t mem_sz, cur_pgsz;
691 rte_iova_t *iovas = NULL;
695 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
696 /* skip anything that is too big */
697 if (pgsizes[pgsz_idx] > SIZE_MAX)
700 cur_pgsz = pgsizes[pgsz_idx];
702 /* if we were told not to allocate hugepages, override */
704 cur_pgsz = sysconf(_SC_PAGESIZE);
706 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
708 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
712 /* allocate our memory */
713 addr = alloc_mem(mem_sz, cur_pgsz, huge);
715 /* if we couldn't allocate memory with a specified page size,
716 * that doesn't mean we can't do it with other page sizes, so
722 /* store IOVA addresses for every page in this memory area */
723 n_pages = mem_sz / cur_pgsz;
725 iovas = malloc(sizeof(*iovas) * n_pages);
728 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
731 /* lock memory if it's not huge pages */
735 /* populate IOVA addresses */
736 for (cur_page = 0; cur_page < n_pages; cur_page++) {
741 offset = cur_pgsz * cur_page;
742 cur = RTE_PTR_ADD(addr, offset);
744 /* touch the page before getting its IOVA */
745 *(volatile char *)cur = 0;
747 iova = rte_mem_virt2iova(cur);
749 iovas[cur_page] = iova;
754 /* if we couldn't allocate anything */
760 param->pgsz = cur_pgsz;
761 param->iova_table = iovas;
762 param->iova_table_len = n_pages;
769 munmap(addr, mem_sz);
775 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
777 struct extmem_param param;
780 memset(¶m, 0, sizeof(param));
782 /* check if our heap exists */
783 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
785 /* create our heap */
786 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
788 TESTPMD_LOG(ERR, "Cannot create heap\n");
793 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
795 TESTPMD_LOG(ERR, "Cannot create memory area\n");
799 /* we now have a valid memory area, so add it to heap */
800 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
801 param.addr, param.len, param.iova_table,
802 param.iova_table_len, param.pgsz);
804 /* when using VFIO, memory is automatically mapped for DMA by EAL */
806 /* not needed any more */
807 free(param.iova_table);
810 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
811 munmap(param.addr, param.len);
817 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
823 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
824 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
829 RTE_ETH_FOREACH_DEV(pid) {
830 struct rte_eth_dev *dev =
831 &rte_eth_devices[pid];
833 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
837 "unable to DMA unmap addr 0x%p "
839 memhdr->addr, dev->data->name);
842 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
845 "unable to un-register addr 0x%p\n", memhdr->addr);
850 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
851 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
854 size_t page_size = sysconf(_SC_PAGESIZE);
857 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
861 "unable to register addr 0x%p\n", memhdr->addr);
864 RTE_ETH_FOREACH_DEV(pid) {
865 struct rte_eth_dev *dev =
866 &rte_eth_devices[pid];
868 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
872 "unable to DMA map addr 0x%p "
874 memhdr->addr, dev->data->name);
880 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
881 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
883 struct rte_pktmbuf_extmem *xmem;
884 unsigned int ext_num, zone_num, elt_num;
887 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
888 elt_num = EXTBUF_ZONE_SIZE / elt_size;
889 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
891 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
893 TESTPMD_LOG(ERR, "Cannot allocate memory for "
894 "external buffer descriptors\n");
898 for (ext_num = 0; ext_num < zone_num; ext_num++) {
899 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
900 const struct rte_memzone *mz;
901 char mz_name[RTE_MEMZONE_NAMESIZE];
904 ret = snprintf(mz_name, sizeof(mz_name),
905 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
906 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
907 errno = ENAMETOOLONG;
911 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
913 RTE_MEMZONE_IOVA_CONTIG |
915 RTE_MEMZONE_SIZE_HINT_ONLY,
919 * The caller exits on external buffer creation
920 * error, so there is no need to free memzones.
926 xseg->buf_ptr = mz->addr;
927 xseg->buf_iova = mz->iova;
928 xseg->buf_len = EXTBUF_ZONE_SIZE;
929 xseg->elt_size = elt_size;
931 if (ext_num == 0 && xmem != NULL) {
940 * Configuration initialisation done once at init time.
942 static struct rte_mempool *
943 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
944 unsigned int socket_id)
946 char pool_name[RTE_MEMPOOL_NAMESIZE];
947 struct rte_mempool *rte_mp = NULL;
950 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
951 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
954 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
955 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
957 switch (mp_alloc_type) {
958 case MP_ALLOC_NATIVE:
960 /* wrapper to rte_mempool_create() */
961 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
962 rte_mbuf_best_mempool_ops());
963 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
964 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
969 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
970 mb_size, (unsigned int) mb_mempool_cache,
971 sizeof(struct rte_pktmbuf_pool_private),
972 socket_id, mempool_flags);
976 if (rte_mempool_populate_anon(rte_mp) == 0) {
977 rte_mempool_free(rte_mp);
981 rte_pktmbuf_pool_init(rte_mp, NULL);
982 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
983 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
987 case MP_ALLOC_XMEM_HUGE:
990 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
992 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
993 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
996 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
998 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1000 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1001 rte_mbuf_best_mempool_ops());
1002 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1003 mb_mempool_cache, 0, mbuf_seg_size,
1009 struct rte_pktmbuf_extmem *ext_mem;
1010 unsigned int ext_num;
1012 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1013 socket_id, pool_name, &ext_mem);
1015 rte_exit(EXIT_FAILURE,
1016 "Can't create pinned data buffers\n");
1018 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1019 rte_mbuf_best_mempool_ops());
1020 rte_mp = rte_pktmbuf_pool_create_extbuf
1021 (pool_name, nb_mbuf, mb_mempool_cache,
1022 0, mbuf_seg_size, socket_id,
1029 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1034 if (rte_mp == NULL) {
1035 rte_exit(EXIT_FAILURE,
1036 "Creation of mbuf pool for socket %u failed: %s\n",
1037 socket_id, rte_strerror(rte_errno));
1038 } else if (verbose_level > 0) {
1039 rte_mempool_dump(stdout, rte_mp);
1045 * Check given socket id is valid or not with NUMA mode,
1046 * if valid, return 0, else return -1
1049 check_socket_id(const unsigned int socket_id)
1051 static int warning_once = 0;
1053 if (new_socket_id(socket_id)) {
1054 if (!warning_once && numa_support)
1055 printf("Warning: NUMA should be configured manually by"
1056 " using --port-numa-config and"
1057 " --ring-numa-config parameters along with"
1066 * Get the allowed maximum number of RX queues.
1067 * *pid return the port id which has minimal value of
1068 * max_rx_queues in all ports.
1071 get_allowed_max_nb_rxq(portid_t *pid)
1073 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1074 bool max_rxq_valid = false;
1076 struct rte_eth_dev_info dev_info;
1078 RTE_ETH_FOREACH_DEV(pi) {
1079 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1082 max_rxq_valid = true;
1083 if (dev_info.max_rx_queues < allowed_max_rxq) {
1084 allowed_max_rxq = dev_info.max_rx_queues;
1088 return max_rxq_valid ? allowed_max_rxq : 0;
1092 * Check input rxq is valid or not.
1093 * If input rxq is not greater than any of maximum number
1094 * of RX queues of all ports, it is valid.
1095 * if valid, return 0, else return -1
1098 check_nb_rxq(queueid_t rxq)
1100 queueid_t allowed_max_rxq;
1103 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1104 if (rxq > allowed_max_rxq) {
1105 printf("Fail: input rxq (%u) can't be greater "
1106 "than max_rx_queues (%u) of port %u\n",
1116 * Get the allowed maximum number of TX queues.
1117 * *pid return the port id which has minimal value of
1118 * max_tx_queues in all ports.
1121 get_allowed_max_nb_txq(portid_t *pid)
1123 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1124 bool max_txq_valid = false;
1126 struct rte_eth_dev_info dev_info;
1128 RTE_ETH_FOREACH_DEV(pi) {
1129 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1132 max_txq_valid = true;
1133 if (dev_info.max_tx_queues < allowed_max_txq) {
1134 allowed_max_txq = dev_info.max_tx_queues;
1138 return max_txq_valid ? allowed_max_txq : 0;
1142 * Check input txq is valid or not.
1143 * If input txq is not greater than any of maximum number
1144 * of TX queues of all ports, it is valid.
1145 * if valid, return 0, else return -1
1148 check_nb_txq(queueid_t txq)
1150 queueid_t allowed_max_txq;
1153 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1154 if (txq > allowed_max_txq) {
1155 printf("Fail: input txq (%u) can't be greater "
1156 "than max_tx_queues (%u) of port %u\n",
1166 * Get the allowed maximum number of RXDs of every rx queue.
1167 * *pid return the port id which has minimal value of
1168 * max_rxd in all queues of all ports.
1171 get_allowed_max_nb_rxd(portid_t *pid)
1173 uint16_t allowed_max_rxd = UINT16_MAX;
1175 struct rte_eth_dev_info dev_info;
1177 RTE_ETH_FOREACH_DEV(pi) {
1178 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1181 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1182 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1186 return allowed_max_rxd;
1190 * Get the allowed minimal number of RXDs of every rx queue.
1191 * *pid return the port id which has minimal value of
1192 * min_rxd in all queues of all ports.
1195 get_allowed_min_nb_rxd(portid_t *pid)
1197 uint16_t allowed_min_rxd = 0;
1199 struct rte_eth_dev_info dev_info;
1201 RTE_ETH_FOREACH_DEV(pi) {
1202 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1205 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1206 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1211 return allowed_min_rxd;
1215 * Check input rxd is valid or not.
1216 * If input rxd is not greater than any of maximum number
1217 * of RXDs of every Rx queues and is not less than any of
1218 * minimal number of RXDs of every Rx queues, it is valid.
1219 * if valid, return 0, else return -1
1222 check_nb_rxd(queueid_t rxd)
1224 uint16_t allowed_max_rxd;
1225 uint16_t allowed_min_rxd;
1228 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1229 if (rxd > allowed_max_rxd) {
1230 printf("Fail: input rxd (%u) can't be greater "
1231 "than max_rxds (%u) of port %u\n",
1238 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1239 if (rxd < allowed_min_rxd) {
1240 printf("Fail: input rxd (%u) can't be less "
1241 "than min_rxds (%u) of port %u\n",
1252 * Get the allowed maximum number of TXDs of every rx queues.
1253 * *pid return the port id which has minimal value of
1254 * max_txd in every tx queue.
1257 get_allowed_max_nb_txd(portid_t *pid)
1259 uint16_t allowed_max_txd = UINT16_MAX;
1261 struct rte_eth_dev_info dev_info;
1263 RTE_ETH_FOREACH_DEV(pi) {
1264 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1267 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1268 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1272 return allowed_max_txd;
1276 * Get the allowed maximum number of TXDs of every tx queues.
1277 * *pid return the port id which has minimal value of
1278 * min_txd in every tx queue.
1281 get_allowed_min_nb_txd(portid_t *pid)
1283 uint16_t allowed_min_txd = 0;
1285 struct rte_eth_dev_info dev_info;
1287 RTE_ETH_FOREACH_DEV(pi) {
1288 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1291 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1292 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1297 return allowed_min_txd;
1301 * Check input txd is valid or not.
1302 * If input txd is not greater than any of maximum number
1303 * of TXDs of every Rx queues, it is valid.
1304 * if valid, return 0, else return -1
1307 check_nb_txd(queueid_t txd)
1309 uint16_t allowed_max_txd;
1310 uint16_t allowed_min_txd;
1313 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1314 if (txd > allowed_max_txd) {
1315 printf("Fail: input txd (%u) can't be greater "
1316 "than max_txds (%u) of port %u\n",
1323 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1324 if (txd < allowed_min_txd) {
1325 printf("Fail: input txd (%u) can't be less "
1326 "than min_txds (%u) of port %u\n",
1337 * Get the allowed maximum number of hairpin queues.
1338 * *pid return the port id which has minimal value of
1339 * max_hairpin_queues in all ports.
1342 get_allowed_max_nb_hairpinq(portid_t *pid)
1344 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1346 struct rte_eth_hairpin_cap cap;
1348 RTE_ETH_FOREACH_DEV(pi) {
1349 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1353 if (cap.max_nb_queues < allowed_max_hairpinq) {
1354 allowed_max_hairpinq = cap.max_nb_queues;
1358 return allowed_max_hairpinq;
1362 * Check input hairpin is valid or not.
1363 * If input hairpin is not greater than any of maximum number
1364 * of hairpin queues of all ports, it is valid.
1365 * if valid, return 0, else return -1
1368 check_nb_hairpinq(queueid_t hairpinq)
1370 queueid_t allowed_max_hairpinq;
1373 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1374 if (hairpinq > allowed_max_hairpinq) {
1375 printf("Fail: input hairpin (%u) can't be greater "
1376 "than max_hairpin_queues (%u) of port %u\n",
1377 hairpinq, allowed_max_hairpinq, pid);
1387 struct rte_port *port;
1388 struct rte_mempool *mbp;
1389 unsigned int nb_mbuf_per_pool;
1391 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1392 struct rte_gro_param gro_param;
1399 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1401 /* Configuration of logical cores. */
1402 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1403 sizeof(struct fwd_lcore *) * nb_lcores,
1404 RTE_CACHE_LINE_SIZE);
1405 if (fwd_lcores == NULL) {
1406 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1407 "failed\n", nb_lcores);
1409 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1410 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1411 sizeof(struct fwd_lcore),
1412 RTE_CACHE_LINE_SIZE);
1413 if (fwd_lcores[lc_id] == NULL) {
1414 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1417 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1420 RTE_ETH_FOREACH_DEV(pid) {
1422 /* Apply default TxRx configuration for all ports */
1423 port->dev_conf.txmode = tx_mode;
1424 port->dev_conf.rxmode = rx_mode;
1426 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1428 rte_exit(EXIT_FAILURE,
1429 "rte_eth_dev_info_get() failed\n");
1431 if (!(port->dev_info.tx_offload_capa &
1432 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1433 port->dev_conf.txmode.offloads &=
1434 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1436 if (port_numa[pid] != NUMA_NO_CONFIG)
1437 port_per_socket[port_numa[pid]]++;
1439 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1442 * if socket_id is invalid,
1443 * set to the first available socket.
1445 if (check_socket_id(socket_id) < 0)
1446 socket_id = socket_ids[0];
1447 port_per_socket[socket_id]++;
1451 /* Apply Rx offloads configuration */
1452 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1453 port->rx_conf[k].offloads =
1454 port->dev_conf.rxmode.offloads;
1455 /* Apply Tx offloads configuration */
1456 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1457 port->tx_conf[k].offloads =
1458 port->dev_conf.txmode.offloads;
1460 /* set flag to initialize port/queue */
1461 port->need_reconfig = 1;
1462 port->need_reconfig_queues = 1;
1463 port->tx_metadata = 0;
1465 /* Check for maximum number of segments per MTU. Accordingly
1466 * update the mbuf data size.
1468 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1469 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1470 data_size = rx_mode.max_rx_pkt_len /
1471 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1473 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1475 mbuf_data_size = data_size +
1476 RTE_PKTMBUF_HEADROOM;
1483 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1487 * Create pools of mbuf.
1488 * If NUMA support is disabled, create a single pool of mbuf in
1489 * socket 0 memory by default.
1490 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1492 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1493 * nb_txd can be configured at run time.
1495 if (param_total_num_mbufs)
1496 nb_mbuf_per_pool = param_total_num_mbufs;
1498 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1499 (nb_lcores * mb_mempool_cache) +
1500 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1501 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1507 for (i = 0; i < num_sockets; i++)
1508 mempools[i] = mbuf_pool_create(mbuf_data_size,
1512 if (socket_num == UMA_NO_CONFIG)
1513 mempools[0] = mbuf_pool_create(mbuf_data_size,
1514 nb_mbuf_per_pool, 0);
1516 mempools[socket_num] = mbuf_pool_create
1524 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1525 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1527 * Records which Mbuf pool to use by each logical core, if needed.
1529 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1530 mbp = mbuf_pool_find(
1531 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1534 mbp = mbuf_pool_find(0);
1535 fwd_lcores[lc_id]->mbp = mbp;
1536 /* initialize GSO context */
1537 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1538 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1539 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1540 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1542 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1545 /* Configuration of packet forwarding streams. */
1546 if (init_fwd_streams() < 0)
1547 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1551 /* create a gro context for each lcore */
1552 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1553 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1554 gro_param.max_item_per_flow = MAX_PKT_BURST;
1555 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1556 gro_param.socket_id = rte_lcore_to_socket_id(
1557 fwd_lcores_cpuids[lc_id]);
1558 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1559 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1560 rte_exit(EXIT_FAILURE,
1561 "rte_gro_ctx_create() failed\n");
1565 #if defined RTE_LIBRTE_PMD_SOFTNIC
1566 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1567 RTE_ETH_FOREACH_DEV(pid) {
1569 const char *driver = port->dev_info.driver_name;
1571 if (strcmp(driver, "net_softnic") == 0)
1572 port->softport.fwd_lcore_arg = fwd_lcores;
1581 reconfig(portid_t new_port_id, unsigned socket_id)
1583 struct rte_port *port;
1586 /* Reconfiguration of Ethernet ports. */
1587 port = &ports[new_port_id];
1589 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1593 /* set flag to initialize port/queue */
1594 port->need_reconfig = 1;
1595 port->need_reconfig_queues = 1;
1596 port->socket_id = socket_id;
1603 init_fwd_streams(void)
1606 struct rte_port *port;
1607 streamid_t sm_id, nb_fwd_streams_new;
1610 /* set socket id according to numa or not */
1611 RTE_ETH_FOREACH_DEV(pid) {
1613 if (nb_rxq > port->dev_info.max_rx_queues) {
1614 printf("Fail: nb_rxq(%d) is greater than "
1615 "max_rx_queues(%d)\n", nb_rxq,
1616 port->dev_info.max_rx_queues);
1619 if (nb_txq > port->dev_info.max_tx_queues) {
1620 printf("Fail: nb_txq(%d) is greater than "
1621 "max_tx_queues(%d)\n", nb_txq,
1622 port->dev_info.max_tx_queues);
1626 if (port_numa[pid] != NUMA_NO_CONFIG)
1627 port->socket_id = port_numa[pid];
1629 port->socket_id = rte_eth_dev_socket_id(pid);
1632 * if socket_id is invalid,
1633 * set to the first available socket.
1635 if (check_socket_id(port->socket_id) < 0)
1636 port->socket_id = socket_ids[0];
1640 if (socket_num == UMA_NO_CONFIG)
1641 port->socket_id = 0;
1643 port->socket_id = socket_num;
1647 q = RTE_MAX(nb_rxq, nb_txq);
1649 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1652 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1653 if (nb_fwd_streams_new == nb_fwd_streams)
1656 if (fwd_streams != NULL) {
1657 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1658 if (fwd_streams[sm_id] == NULL)
1660 rte_free(fwd_streams[sm_id]);
1661 fwd_streams[sm_id] = NULL;
1663 rte_free(fwd_streams);
1668 nb_fwd_streams = nb_fwd_streams_new;
1669 if (nb_fwd_streams) {
1670 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1671 sizeof(struct fwd_stream *) * nb_fwd_streams,
1672 RTE_CACHE_LINE_SIZE);
1673 if (fwd_streams == NULL)
1674 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1675 " (struct fwd_stream *)) failed\n",
1678 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1679 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1680 " struct fwd_stream", sizeof(struct fwd_stream),
1681 RTE_CACHE_LINE_SIZE);
1682 if (fwd_streams[sm_id] == NULL)
1683 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1684 "(struct fwd_stream) failed\n");
1691 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1693 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1695 unsigned int total_burst;
1696 unsigned int nb_burst;
1697 unsigned int burst_stats[3];
1698 uint16_t pktnb_stats[3];
1700 int burst_percent[3];
1703 * First compute the total number of packet bursts and the
1704 * two highest numbers of bursts of the same number of packets.
1707 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1708 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1709 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1710 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1713 total_burst += nb_burst;
1714 if (nb_burst > burst_stats[0]) {
1715 burst_stats[1] = burst_stats[0];
1716 pktnb_stats[1] = pktnb_stats[0];
1717 burst_stats[0] = nb_burst;
1718 pktnb_stats[0] = nb_pkt;
1719 } else if (nb_burst > burst_stats[1]) {
1720 burst_stats[1] = nb_burst;
1721 pktnb_stats[1] = nb_pkt;
1724 if (total_burst == 0)
1726 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1727 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1728 burst_percent[0], (int) pktnb_stats[0]);
1729 if (burst_stats[0] == total_burst) {
1733 if (burst_stats[0] + burst_stats[1] == total_burst) {
1734 printf(" + %d%% of %d pkts]\n",
1735 100 - burst_percent[0], pktnb_stats[1]);
1738 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1739 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1740 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1741 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1744 printf(" + %d%% of %d pkts + %d%% of others]\n",
1745 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1747 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1750 fwd_stream_stats_display(streamid_t stream_id)
1752 struct fwd_stream *fs;
1753 static const char *fwd_top_stats_border = "-------";
1755 fs = fwd_streams[stream_id];
1756 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1757 (fs->fwd_dropped == 0))
1759 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1760 "TX Port=%2d/Queue=%2d %s\n",
1761 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1762 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1763 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1764 " TX-dropped: %-14"PRIu64,
1765 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1767 /* if checksum mode */
1768 if (cur_fwd_eng == &csum_fwd_engine) {
1769 printf(" RX- bad IP checksum: %-14"PRIu64
1770 " Rx- bad L4 checksum: %-14"PRIu64
1771 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1772 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1773 fs->rx_bad_outer_l4_csum);
1778 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1779 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1780 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1785 fwd_stats_display(void)
1787 static const char *fwd_stats_border = "----------------------";
1788 static const char *acc_stats_border = "+++++++++++++++";
1790 struct fwd_stream *rx_stream;
1791 struct fwd_stream *tx_stream;
1792 uint64_t tx_dropped;
1793 uint64_t rx_bad_ip_csum;
1794 uint64_t rx_bad_l4_csum;
1795 uint64_t rx_bad_outer_l4_csum;
1796 } ports_stats[RTE_MAX_ETHPORTS];
1797 uint64_t total_rx_dropped = 0;
1798 uint64_t total_tx_dropped = 0;
1799 uint64_t total_rx_nombuf = 0;
1800 struct rte_eth_stats stats;
1801 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1802 uint64_t fwd_cycles = 0;
1804 uint64_t total_recv = 0;
1805 uint64_t total_xmit = 0;
1806 struct rte_port *port;
1811 memset(ports_stats, 0, sizeof(ports_stats));
1813 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1814 struct fwd_stream *fs = fwd_streams[sm_id];
1816 if (cur_fwd_config.nb_fwd_streams >
1817 cur_fwd_config.nb_fwd_ports) {
1818 fwd_stream_stats_display(sm_id);
1820 ports_stats[fs->tx_port].tx_stream = fs;
1821 ports_stats[fs->rx_port].rx_stream = fs;
1824 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1826 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1827 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1828 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1829 fs->rx_bad_outer_l4_csum;
1831 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1832 fwd_cycles += fs->core_cycles;
1835 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1838 pt_id = fwd_ports_ids[i];
1839 port = &ports[pt_id];
1841 rte_eth_stats_get(pt_id, &stats);
1842 stats.ipackets -= port->stats.ipackets;
1843 stats.opackets -= port->stats.opackets;
1844 stats.ibytes -= port->stats.ibytes;
1845 stats.obytes -= port->stats.obytes;
1846 stats.imissed -= port->stats.imissed;
1847 stats.oerrors -= port->stats.oerrors;
1848 stats.rx_nombuf -= port->stats.rx_nombuf;
1850 total_recv += stats.ipackets;
1851 total_xmit += stats.opackets;
1852 total_rx_dropped += stats.imissed;
1853 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1854 total_tx_dropped += stats.oerrors;
1855 total_rx_nombuf += stats.rx_nombuf;
1857 printf("\n %s Forward statistics for port %-2d %s\n",
1858 fwd_stats_border, pt_id, fwd_stats_border);
1860 if (!port->rx_queue_stats_mapping_enabled &&
1861 !port->tx_queue_stats_mapping_enabled) {
1862 printf(" RX-packets: %-14"PRIu64
1863 " RX-dropped: %-14"PRIu64
1864 "RX-total: %-"PRIu64"\n",
1865 stats.ipackets, stats.imissed,
1866 stats.ipackets + stats.imissed);
1868 if (cur_fwd_eng == &csum_fwd_engine)
1869 printf(" Bad-ipcsum: %-14"PRIu64
1870 " Bad-l4csum: %-14"PRIu64
1871 "Bad-outer-l4csum: %-14"PRIu64"\n",
1872 ports_stats[pt_id].rx_bad_ip_csum,
1873 ports_stats[pt_id].rx_bad_l4_csum,
1874 ports_stats[pt_id].rx_bad_outer_l4_csum);
1875 if (stats.ierrors + stats.rx_nombuf > 0) {
1876 printf(" RX-error: %-"PRIu64"\n",
1878 printf(" RX-nombufs: %-14"PRIu64"\n",
1882 printf(" TX-packets: %-14"PRIu64
1883 " TX-dropped: %-14"PRIu64
1884 "TX-total: %-"PRIu64"\n",
1885 stats.opackets, ports_stats[pt_id].tx_dropped,
1886 stats.opackets + ports_stats[pt_id].tx_dropped);
1888 printf(" RX-packets: %14"PRIu64
1889 " RX-dropped:%14"PRIu64
1890 " RX-total:%14"PRIu64"\n",
1891 stats.ipackets, stats.imissed,
1892 stats.ipackets + stats.imissed);
1894 if (cur_fwd_eng == &csum_fwd_engine)
1895 printf(" Bad-ipcsum:%14"PRIu64
1896 " Bad-l4csum:%14"PRIu64
1897 " Bad-outer-l4csum: %-14"PRIu64"\n",
1898 ports_stats[pt_id].rx_bad_ip_csum,
1899 ports_stats[pt_id].rx_bad_l4_csum,
1900 ports_stats[pt_id].rx_bad_outer_l4_csum);
1901 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1902 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1903 printf(" RX-nombufs: %14"PRIu64"\n",
1907 printf(" TX-packets: %14"PRIu64
1908 " TX-dropped:%14"PRIu64
1909 " TX-total:%14"PRIu64"\n",
1910 stats.opackets, ports_stats[pt_id].tx_dropped,
1911 stats.opackets + ports_stats[pt_id].tx_dropped);
1914 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1915 if (ports_stats[pt_id].rx_stream)
1916 pkt_burst_stats_display("RX",
1917 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1918 if (ports_stats[pt_id].tx_stream)
1919 pkt_burst_stats_display("TX",
1920 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1923 if (port->rx_queue_stats_mapping_enabled) {
1925 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1926 printf(" Stats reg %2d RX-packets:%14"PRIu64
1927 " RX-errors:%14"PRIu64
1928 " RX-bytes:%14"PRIu64"\n",
1929 j, stats.q_ipackets[j],
1930 stats.q_errors[j], stats.q_ibytes[j]);
1934 if (port->tx_queue_stats_mapping_enabled) {
1935 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1936 printf(" Stats reg %2d TX-packets:%14"PRIu64
1939 j, stats.q_opackets[j],
1944 printf(" %s--------------------------------%s\n",
1945 fwd_stats_border, fwd_stats_border);
1948 printf("\n %s Accumulated forward statistics for all ports"
1950 acc_stats_border, acc_stats_border);
1951 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1953 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1955 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1956 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1957 if (total_rx_nombuf > 0)
1958 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1959 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1961 acc_stats_border, acc_stats_border);
1962 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1963 #define CYC_PER_MHZ 1E6
1965 printf("\n CPU cycles/packet=%.2F (total cycles="
1966 "%"PRIu64" / total RX packets=%"PRIu64") at %"PRIu64
1968 (double) fwd_cycles / total_recv,
1969 fwd_cycles, total_recv,
1970 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1975 fwd_stats_reset(void)
1981 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1982 pt_id = fwd_ports_ids[i];
1983 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1985 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1986 struct fwd_stream *fs = fwd_streams[sm_id];
1990 fs->fwd_dropped = 0;
1991 fs->rx_bad_ip_csum = 0;
1992 fs->rx_bad_l4_csum = 0;
1993 fs->rx_bad_outer_l4_csum = 0;
1995 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1996 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1997 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1999 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
2000 fs->core_cycles = 0;
2006 flush_fwd_rx_queues(void)
2008 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2015 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2016 uint64_t timer_period;
2018 /* convert to number of cycles */
2019 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2021 for (j = 0; j < 2; j++) {
2022 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2023 for (rxq = 0; rxq < nb_rxq; rxq++) {
2024 port_id = fwd_ports_ids[rxp];
2026 * testpmd can stuck in the below do while loop
2027 * if rte_eth_rx_burst() always returns nonzero
2028 * packets. So timer is added to exit this loop
2029 * after 1sec timer expiry.
2031 prev_tsc = rte_rdtsc();
2033 nb_rx = rte_eth_rx_burst(port_id, rxq,
2034 pkts_burst, MAX_PKT_BURST);
2035 for (i = 0; i < nb_rx; i++)
2036 rte_pktmbuf_free(pkts_burst[i]);
2038 cur_tsc = rte_rdtsc();
2039 diff_tsc = cur_tsc - prev_tsc;
2040 timer_tsc += diff_tsc;
2041 } while ((nb_rx > 0) &&
2042 (timer_tsc < timer_period));
2046 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2051 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2053 struct fwd_stream **fsm;
2056 #ifdef RTE_LIBRTE_BITRATE
2057 uint64_t tics_per_1sec;
2058 uint64_t tics_datum;
2059 uint64_t tics_current;
2060 uint16_t i, cnt_ports;
2062 cnt_ports = nb_ports;
2063 tics_datum = rte_rdtsc();
2064 tics_per_1sec = rte_get_timer_hz();
2066 fsm = &fwd_streams[fc->stream_idx];
2067 nb_fs = fc->stream_nb;
2069 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2070 (*pkt_fwd)(fsm[sm_id]);
2071 #ifdef RTE_LIBRTE_BITRATE
2072 if (bitrate_enabled != 0 &&
2073 bitrate_lcore_id == rte_lcore_id()) {
2074 tics_current = rte_rdtsc();
2075 if (tics_current - tics_datum >= tics_per_1sec) {
2076 /* Periodic bitrate calculation */
2077 for (i = 0; i < cnt_ports; i++)
2078 rte_stats_bitrate_calc(bitrate_data,
2080 tics_datum = tics_current;
2084 #ifdef RTE_LIBRTE_LATENCY_STATS
2085 if (latencystats_enabled != 0 &&
2086 latencystats_lcore_id == rte_lcore_id())
2087 rte_latencystats_update();
2090 } while (! fc->stopped);
2094 start_pkt_forward_on_core(void *fwd_arg)
2096 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2097 cur_fwd_config.fwd_eng->packet_fwd);
2102 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2103 * Used to start communication flows in network loopback test configurations.
2106 run_one_txonly_burst_on_core(void *fwd_arg)
2108 struct fwd_lcore *fwd_lc;
2109 struct fwd_lcore tmp_lcore;
2111 fwd_lc = (struct fwd_lcore *) fwd_arg;
2112 tmp_lcore = *fwd_lc;
2113 tmp_lcore.stopped = 1;
2114 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2119 * Launch packet forwarding:
2120 * - Setup per-port forwarding context.
2121 * - launch logical cores with their forwarding configuration.
2124 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2126 port_fwd_begin_t port_fwd_begin;
2131 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2132 if (port_fwd_begin != NULL) {
2133 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2134 (*port_fwd_begin)(fwd_ports_ids[i]);
2136 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2137 lc_id = fwd_lcores_cpuids[i];
2138 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2139 fwd_lcores[i]->stopped = 0;
2140 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2141 fwd_lcores[i], lc_id);
2143 printf("launch lcore %u failed - diag=%d\n",
2150 * Launch packet forwarding configuration.
2153 start_packet_forwarding(int with_tx_first)
2155 port_fwd_begin_t port_fwd_begin;
2156 port_fwd_end_t port_fwd_end;
2157 struct rte_port *port;
2161 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2162 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2164 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2165 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2167 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2168 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2169 (!nb_rxq || !nb_txq))
2170 rte_exit(EXIT_FAILURE,
2171 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2172 cur_fwd_eng->fwd_mode_name);
2174 if (all_ports_started() == 0) {
2175 printf("Not all ports were started\n");
2178 if (test_done == 0) {
2179 printf("Packet forwarding already started\n");
2185 for (i = 0; i < nb_fwd_ports; i++) {
2186 pt_id = fwd_ports_ids[i];
2187 port = &ports[pt_id];
2188 if (!port->dcb_flag) {
2189 printf("In DCB mode, all forwarding ports must "
2190 "be configured in this mode.\n");
2194 if (nb_fwd_lcores == 1) {
2195 printf("In DCB mode,the nb forwarding cores "
2196 "should be larger than 1.\n");
2205 flush_fwd_rx_queues();
2207 pkt_fwd_config_display(&cur_fwd_config);
2208 rxtx_config_display();
2211 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2212 pt_id = fwd_ports_ids[i];
2213 port = &ports[pt_id];
2214 map_port_queue_stats_mapping_registers(pt_id, port);
2216 if (with_tx_first) {
2217 port_fwd_begin = tx_only_engine.port_fwd_begin;
2218 if (port_fwd_begin != NULL) {
2219 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2220 (*port_fwd_begin)(fwd_ports_ids[i]);
2222 while (with_tx_first--) {
2223 launch_packet_forwarding(
2224 run_one_txonly_burst_on_core);
2225 rte_eal_mp_wait_lcore();
2227 port_fwd_end = tx_only_engine.port_fwd_end;
2228 if (port_fwd_end != NULL) {
2229 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2230 (*port_fwd_end)(fwd_ports_ids[i]);
2233 launch_packet_forwarding(start_pkt_forward_on_core);
2237 stop_packet_forwarding(void)
2239 port_fwd_end_t port_fwd_end;
2245 printf("Packet forwarding not started\n");
2248 printf("Telling cores to stop...");
2249 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2250 fwd_lcores[lc_id]->stopped = 1;
2251 printf("\nWaiting for lcores to finish...\n");
2252 rte_eal_mp_wait_lcore();
2253 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2254 if (port_fwd_end != NULL) {
2255 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2256 pt_id = fwd_ports_ids[i];
2257 (*port_fwd_end)(pt_id);
2261 fwd_stats_display();
2263 printf("\nDone.\n");
2268 dev_set_link_up(portid_t pid)
2270 if (rte_eth_dev_set_link_up(pid) < 0)
2271 printf("\nSet link up fail.\n");
2275 dev_set_link_down(portid_t pid)
2277 if (rte_eth_dev_set_link_down(pid) < 0)
2278 printf("\nSet link down fail.\n");
2282 all_ports_started(void)
2285 struct rte_port *port;
2287 RTE_ETH_FOREACH_DEV(pi) {
2289 /* Check if there is a port which is not started */
2290 if ((port->port_status != RTE_PORT_STARTED) &&
2291 (port->slave_flag == 0))
2295 /* No port is not started */
2300 port_is_stopped(portid_t port_id)
2302 struct rte_port *port = &ports[port_id];
2304 if ((port->port_status != RTE_PORT_STOPPED) &&
2305 (port->slave_flag == 0))
2311 all_ports_stopped(void)
2315 RTE_ETH_FOREACH_DEV(pi) {
2316 if (!port_is_stopped(pi))
2324 port_is_started(portid_t port_id)
2326 if (port_id_is_invalid(port_id, ENABLED_WARN))
2329 if (ports[port_id].port_status != RTE_PORT_STARTED)
2335 /* Configure the Rx and Tx hairpin queues for the selected port. */
2337 setup_hairpin_queues(portid_t pi)
2340 struct rte_eth_hairpin_conf hairpin_conf = {
2345 struct rte_port *port = &ports[pi];
2347 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2348 hairpin_conf.peers[0].port = pi;
2349 hairpin_conf.peers[0].queue = i + nb_rxq;
2350 diag = rte_eth_tx_hairpin_queue_setup
2351 (pi, qi, nb_txd, &hairpin_conf);
2356 /* Fail to setup rx queue, return */
2357 if (rte_atomic16_cmpset(&(port->port_status),
2359 RTE_PORT_STOPPED) == 0)
2360 printf("Port %d can not be set back "
2361 "to stopped\n", pi);
2362 printf("Fail to configure port %d hairpin "
2364 /* try to reconfigure queues next time */
2365 port->need_reconfig_queues = 1;
2368 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2369 hairpin_conf.peers[0].port = pi;
2370 hairpin_conf.peers[0].queue = i + nb_txq;
2371 diag = rte_eth_rx_hairpin_queue_setup
2372 (pi, qi, nb_rxd, &hairpin_conf);
2377 /* Fail to setup rx queue, return */
2378 if (rte_atomic16_cmpset(&(port->port_status),
2380 RTE_PORT_STOPPED) == 0)
2381 printf("Port %d can not be set back "
2382 "to stopped\n", pi);
2383 printf("Fail to configure port %d hairpin "
2385 /* try to reconfigure queues next time */
2386 port->need_reconfig_queues = 1;
2393 start_port(portid_t pid)
2395 int diag, need_check_link_status = -1;
2398 struct rte_port *port;
2399 struct rte_ether_addr mac_addr;
2400 struct rte_eth_hairpin_cap cap;
2402 if (port_id_is_invalid(pid, ENABLED_WARN))
2407 RTE_ETH_FOREACH_DEV(pi) {
2408 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2411 need_check_link_status = 0;
2413 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2414 RTE_PORT_HANDLING) == 0) {
2415 printf("Port %d is now not stopped\n", pi);
2419 if (port->need_reconfig > 0) {
2420 port->need_reconfig = 0;
2422 if (flow_isolate_all) {
2423 int ret = port_flow_isolate(pi, 1);
2425 printf("Failed to apply isolated"
2426 " mode on port %d\n", pi);
2430 configure_rxtx_dump_callbacks(0);
2431 printf("Configuring Port %d (socket %u)\n", pi,
2433 if (nb_hairpinq > 0 &&
2434 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2435 printf("Port %d doesn't support hairpin "
2439 /* configure port */
2440 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2441 nb_txq + nb_hairpinq,
2444 if (rte_atomic16_cmpset(&(port->port_status),
2445 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2446 printf("Port %d can not be set back "
2447 "to stopped\n", pi);
2448 printf("Fail to configure port %d\n", pi);
2449 /* try to reconfigure port next time */
2450 port->need_reconfig = 1;
2454 if (port->need_reconfig_queues > 0) {
2455 port->need_reconfig_queues = 0;
2456 /* setup tx queues */
2457 for (qi = 0; qi < nb_txq; qi++) {
2458 if ((numa_support) &&
2459 (txring_numa[pi] != NUMA_NO_CONFIG))
2460 diag = rte_eth_tx_queue_setup(pi, qi,
2461 port->nb_tx_desc[qi],
2463 &(port->tx_conf[qi]));
2465 diag = rte_eth_tx_queue_setup(pi, qi,
2466 port->nb_tx_desc[qi],
2468 &(port->tx_conf[qi]));
2473 /* Fail to setup tx queue, return */
2474 if (rte_atomic16_cmpset(&(port->port_status),
2476 RTE_PORT_STOPPED) == 0)
2477 printf("Port %d can not be set back "
2478 "to stopped\n", pi);
2479 printf("Fail to configure port %d tx queues\n",
2481 /* try to reconfigure queues next time */
2482 port->need_reconfig_queues = 1;
2485 for (qi = 0; qi < nb_rxq; qi++) {
2486 /* setup rx queues */
2487 if ((numa_support) &&
2488 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2489 struct rte_mempool * mp =
2490 mbuf_pool_find(rxring_numa[pi]);
2492 printf("Failed to setup RX queue:"
2493 "No mempool allocation"
2494 " on the socket %d\n",
2499 diag = rte_eth_rx_queue_setup(pi, qi,
2500 port->nb_rx_desc[qi],
2502 &(port->rx_conf[qi]),
2505 struct rte_mempool *mp =
2506 mbuf_pool_find(port->socket_id);
2508 printf("Failed to setup RX queue:"
2509 "No mempool allocation"
2510 " on the socket %d\n",
2514 diag = rte_eth_rx_queue_setup(pi, qi,
2515 port->nb_rx_desc[qi],
2517 &(port->rx_conf[qi]),
2523 /* Fail to setup rx queue, return */
2524 if (rte_atomic16_cmpset(&(port->port_status),
2526 RTE_PORT_STOPPED) == 0)
2527 printf("Port %d can not be set back "
2528 "to stopped\n", pi);
2529 printf("Fail to configure port %d rx queues\n",
2531 /* try to reconfigure queues next time */
2532 port->need_reconfig_queues = 1;
2535 /* setup hairpin queues */
2536 if (setup_hairpin_queues(pi) != 0)
2539 configure_rxtx_dump_callbacks(verbose_level);
2541 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2545 "Port %d: Failed to disable Ptype parsing\n",
2550 if (rte_eth_dev_start(pi) < 0) {
2551 printf("Fail to start port %d\n", pi);
2553 /* Fail to setup rx queue, return */
2554 if (rte_atomic16_cmpset(&(port->port_status),
2555 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2556 printf("Port %d can not be set back to "
2561 if (rte_atomic16_cmpset(&(port->port_status),
2562 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2563 printf("Port %d can not be set into started\n", pi);
2565 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2566 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2567 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2568 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2569 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2571 /* at least one port started, need checking link status */
2572 need_check_link_status = 1;
2575 if (need_check_link_status == 1 && !no_link_check)
2576 check_all_ports_link_status(RTE_PORT_ALL);
2577 else if (need_check_link_status == 0)
2578 printf("Please stop the ports first\n");
2585 stop_port(portid_t pid)
2588 struct rte_port *port;
2589 int need_check_link_status = 0;
2596 if (port_id_is_invalid(pid, ENABLED_WARN))
2599 printf("Stopping ports...\n");
2601 RTE_ETH_FOREACH_DEV(pi) {
2602 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2605 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2606 printf("Please remove port %d from forwarding configuration.\n", pi);
2610 if (port_is_bonding_slave(pi)) {
2611 printf("Please remove port %d from bonded device.\n", pi);
2616 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2617 RTE_PORT_HANDLING) == 0)
2620 rte_eth_dev_stop(pi);
2622 if (rte_atomic16_cmpset(&(port->port_status),
2623 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2624 printf("Port %d can not be set into stopped\n", pi);
2625 need_check_link_status = 1;
2627 if (need_check_link_status && !no_link_check)
2628 check_all_ports_link_status(RTE_PORT_ALL);
2634 remove_invalid_ports_in(portid_t *array, portid_t *total)
2637 portid_t new_total = 0;
2639 for (i = 0; i < *total; i++)
2640 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2641 array[new_total] = array[i];
2648 remove_invalid_ports(void)
2650 remove_invalid_ports_in(ports_ids, &nb_ports);
2651 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2652 nb_cfg_ports = nb_fwd_ports;
2656 close_port(portid_t pid)
2659 struct rte_port *port;
2661 if (port_id_is_invalid(pid, ENABLED_WARN))
2664 printf("Closing ports...\n");
2666 RTE_ETH_FOREACH_DEV(pi) {
2667 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2670 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2671 printf("Please remove port %d from forwarding configuration.\n", pi);
2675 if (port_is_bonding_slave(pi)) {
2676 printf("Please remove port %d from bonded device.\n", pi);
2681 if (rte_atomic16_cmpset(&(port->port_status),
2682 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2683 printf("Port %d is already closed\n", pi);
2687 if (rte_atomic16_cmpset(&(port->port_status),
2688 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2689 printf("Port %d is now not stopped\n", pi);
2693 if (port->flow_list)
2694 port_flow_flush(pi);
2695 rte_eth_dev_close(pi);
2697 remove_invalid_ports();
2699 if (rte_atomic16_cmpset(&(port->port_status),
2700 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2701 printf("Port %d cannot be set to closed\n", pi);
2708 reset_port(portid_t pid)
2712 struct rte_port *port;
2714 if (port_id_is_invalid(pid, ENABLED_WARN))
2717 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2718 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2719 printf("Can not reset port(s), please stop port(s) first.\n");
2723 printf("Resetting ports...\n");
2725 RTE_ETH_FOREACH_DEV(pi) {
2726 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2729 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2730 printf("Please remove port %d from forwarding "
2731 "configuration.\n", pi);
2735 if (port_is_bonding_slave(pi)) {
2736 printf("Please remove port %d from bonded device.\n",
2741 diag = rte_eth_dev_reset(pi);
2744 port->need_reconfig = 1;
2745 port->need_reconfig_queues = 1;
2747 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2755 attach_port(char *identifier)
2758 struct rte_dev_iterator iterator;
2760 printf("Attaching a new port...\n");
2762 if (identifier == NULL) {
2763 printf("Invalid parameters are specified\n");
2767 if (rte_dev_probe(identifier) < 0) {
2768 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2772 /* first attach mode: event */
2773 if (setup_on_probe_event) {
2774 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2775 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2776 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2777 ports[pi].need_setup != 0)
2778 setup_attached_port(pi);
2782 /* second attach mode: iterator */
2783 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2784 /* setup ports matching the devargs used for probing */
2785 if (port_is_forwarding(pi))
2786 continue; /* port was already attached before */
2787 setup_attached_port(pi);
2792 setup_attached_port(portid_t pi)
2794 unsigned int socket_id;
2797 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2798 /* if socket_id is invalid, set to the first available socket. */
2799 if (check_socket_id(socket_id) < 0)
2800 socket_id = socket_ids[0];
2801 reconfig(pi, socket_id);
2802 ret = rte_eth_promiscuous_enable(pi);
2804 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2805 pi, rte_strerror(-ret));
2807 ports_ids[nb_ports++] = pi;
2808 fwd_ports_ids[nb_fwd_ports++] = pi;
2809 nb_cfg_ports = nb_fwd_ports;
2810 ports[pi].need_setup = 0;
2811 ports[pi].port_status = RTE_PORT_STOPPED;
2813 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2818 detach_device(struct rte_device *dev)
2823 printf("Device already removed\n");
2827 printf("Removing a device...\n");
2829 if (rte_dev_remove(dev) < 0) {
2830 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2833 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2834 /* reset mapping between old ports and removed device */
2835 rte_eth_devices[sibling].device = NULL;
2836 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2837 /* sibling ports are forced to be closed */
2838 ports[sibling].port_status = RTE_PORT_CLOSED;
2839 printf("Port %u is closed\n", sibling);
2843 remove_invalid_ports();
2845 printf("Device is detached\n");
2846 printf("Now total ports is %d\n", nb_ports);
2852 detach_port_device(portid_t port_id)
2854 if (port_id_is_invalid(port_id, ENABLED_WARN))
2857 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2858 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2859 printf("Port not stopped\n");
2862 printf("Port was not closed\n");
2863 if (ports[port_id].flow_list)
2864 port_flow_flush(port_id);
2867 detach_device(rte_eth_devices[port_id].device);
2871 detach_devargs(char *identifier)
2873 struct rte_dev_iterator iterator;
2874 struct rte_devargs da;
2877 printf("Removing a device...\n");
2879 memset(&da, 0, sizeof(da));
2880 if (rte_devargs_parsef(&da, "%s", identifier)) {
2881 printf("cannot parse identifier\n");
2887 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2888 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2889 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2890 printf("Port %u not stopped\n", port_id);
2891 rte_eth_iterator_cleanup(&iterator);
2895 /* sibling ports are forced to be closed */
2896 if (ports[port_id].flow_list)
2897 port_flow_flush(port_id);
2898 ports[port_id].port_status = RTE_PORT_CLOSED;
2899 printf("Port %u is now closed\n", port_id);
2903 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2904 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2905 da.name, da.bus->name);
2909 remove_invalid_ports();
2911 printf("Device %s is detached\n", identifier);
2912 printf("Now total ports is %d\n", nb_ports);
2924 stop_packet_forwarding();
2926 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2928 if (mp_alloc_type == MP_ALLOC_ANON)
2929 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2933 if (ports != NULL) {
2935 RTE_ETH_FOREACH_DEV(pt_id) {
2936 printf("\nStopping port %d...\n", pt_id);
2940 RTE_ETH_FOREACH_DEV(pt_id) {
2941 printf("\nShutting down port %d...\n", pt_id);
2948 ret = rte_dev_event_monitor_stop();
2951 "fail to stop device event monitor.");
2955 ret = rte_dev_event_callback_unregister(NULL,
2956 dev_event_callback, NULL);
2959 "fail to unregister device event callback.\n");
2963 ret = rte_dev_hotplug_handle_disable();
2966 "fail to disable hotplug handling.\n");
2970 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2972 rte_mempool_free(mempools[i]);
2975 printf("\nBye...\n");
2978 typedef void (*cmd_func_t)(void);
2979 struct pmd_test_command {
2980 const char *cmd_name;
2981 cmd_func_t cmd_func;
2984 /* Check the link status of all ports in up to 9s, and print them finally */
2986 check_all_ports_link_status(uint32_t port_mask)
2988 #define CHECK_INTERVAL 100 /* 100ms */
2989 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2991 uint8_t count, all_ports_up, print_flag = 0;
2992 struct rte_eth_link link;
2995 printf("Checking link statuses...\n");
2997 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2999 RTE_ETH_FOREACH_DEV(portid) {
3000 if ((port_mask & (1 << portid)) == 0)
3002 memset(&link, 0, sizeof(link));
3003 ret = rte_eth_link_get_nowait(portid, &link);
3006 if (print_flag == 1)
3007 printf("Port %u link get failed: %s\n",
3008 portid, rte_strerror(-ret));
3011 /* print link status if flag set */
3012 if (print_flag == 1) {
3013 if (link.link_status)
3015 "Port%d Link Up. speed %u Mbps- %s\n",
3016 portid, link.link_speed,
3017 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
3018 ("full-duplex") : ("half-duplex"));
3020 printf("Port %d Link Down\n", portid);
3023 /* clear all_ports_up flag if any link down */
3024 if (link.link_status == ETH_LINK_DOWN) {
3029 /* after finally printing all link status, get out */
3030 if (print_flag == 1)
3033 if (all_ports_up == 0) {
3035 rte_delay_ms(CHECK_INTERVAL);
3038 /* set the print_flag if all ports up or timeout */
3039 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3049 * This callback is for remove a port for a device. It has limitation because
3050 * it is not for multiple port removal for a device.
3051 * TODO: the device detach invoke will plan to be removed from user side to
3052 * eal. And convert all PMDs to free port resources on ether device closing.
3055 rmv_port_callback(void *arg)
3057 int need_to_start = 0;
3058 int org_no_link_check = no_link_check;
3059 portid_t port_id = (intptr_t)arg;
3060 struct rte_device *dev;
3062 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3064 if (!test_done && port_is_forwarding(port_id)) {
3066 stop_packet_forwarding();
3070 no_link_check = org_no_link_check;
3072 /* Save rte_device pointer before closing ethdev port */
3073 dev = rte_eth_devices[port_id].device;
3074 close_port(port_id);
3075 detach_device(dev); /* might be already removed or have more ports */
3078 start_packet_forwarding(0);
3081 /* This function is used by the interrupt thread */
3083 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3086 RTE_SET_USED(param);
3087 RTE_SET_USED(ret_param);
3089 if (type >= RTE_ETH_EVENT_MAX) {
3090 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3091 port_id, __func__, type);
3093 } else if (event_print_mask & (UINT32_C(1) << type)) {
3094 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3095 eth_event_desc[type]);
3100 case RTE_ETH_EVENT_NEW:
3101 ports[port_id].need_setup = 1;
3102 ports[port_id].port_status = RTE_PORT_HANDLING;
3104 case RTE_ETH_EVENT_INTR_RMV:
3105 if (port_id_is_invalid(port_id, DISABLED_WARN))
3107 if (rte_eal_alarm_set(100000,
3108 rmv_port_callback, (void *)(intptr_t)port_id))
3109 fprintf(stderr, "Could not set up deferred device removal\n");
3118 register_eth_event_callback(void)
3121 enum rte_eth_event_type event;
3123 for (event = RTE_ETH_EVENT_UNKNOWN;
3124 event < RTE_ETH_EVENT_MAX; event++) {
3125 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3130 TESTPMD_LOG(ERR, "Failed to register callback for "
3131 "%s event\n", eth_event_desc[event]);
3139 /* This function is used by the interrupt thread */
3141 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3142 __rte_unused void *arg)
3147 if (type >= RTE_DEV_EVENT_MAX) {
3148 fprintf(stderr, "%s called upon invalid event %d\n",
3154 case RTE_DEV_EVENT_REMOVE:
3155 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3157 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3159 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3164 * Because the user's callback is invoked in eal interrupt
3165 * callback, the interrupt callback need to be finished before
3166 * it can be unregistered when detaching device. So finish
3167 * callback soon and use a deferred removal to detach device
3168 * is need. It is a workaround, once the device detaching be
3169 * moved into the eal in the future, the deferred removal could
3172 if (rte_eal_alarm_set(100000,
3173 rmv_port_callback, (void *)(intptr_t)port_id))
3175 "Could not set up deferred device removal\n");
3177 case RTE_DEV_EVENT_ADD:
3178 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3180 /* TODO: After finish kernel driver binding,
3181 * begin to attach port.
3190 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3194 uint8_t mapping_found = 0;
3196 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3197 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3198 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3199 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3200 tx_queue_stats_mappings[i].queue_id,
3201 tx_queue_stats_mappings[i].stats_counter_id);
3208 port->tx_queue_stats_mapping_enabled = 1;
3213 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3217 uint8_t mapping_found = 0;
3219 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3220 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3221 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3222 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3223 rx_queue_stats_mappings[i].queue_id,
3224 rx_queue_stats_mappings[i].stats_counter_id);
3231 port->rx_queue_stats_mapping_enabled = 1;
3236 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3240 diag = set_tx_queue_stats_mapping_registers(pi, port);
3242 if (diag == -ENOTSUP) {
3243 port->tx_queue_stats_mapping_enabled = 0;
3244 printf("TX queue stats mapping not supported port id=%d\n", pi);
3247 rte_exit(EXIT_FAILURE,
3248 "set_tx_queue_stats_mapping_registers "
3249 "failed for port id=%d diag=%d\n",
3253 diag = set_rx_queue_stats_mapping_registers(pi, port);
3255 if (diag == -ENOTSUP) {
3256 port->rx_queue_stats_mapping_enabled = 0;
3257 printf("RX queue stats mapping not supported port id=%d\n", pi);
3260 rte_exit(EXIT_FAILURE,
3261 "set_rx_queue_stats_mapping_registers "
3262 "failed for port id=%d diag=%d\n",
3268 rxtx_port_config(struct rte_port *port)
3273 for (qid = 0; qid < nb_rxq; qid++) {
3274 offloads = port->rx_conf[qid].offloads;
3275 port->rx_conf[qid] = port->dev_info.default_rxconf;
3277 port->rx_conf[qid].offloads = offloads;
3279 /* Check if any Rx parameters have been passed */
3280 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3281 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3283 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3284 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3286 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3287 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3289 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3290 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3292 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3293 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3295 port->nb_rx_desc[qid] = nb_rxd;
3298 for (qid = 0; qid < nb_txq; qid++) {
3299 offloads = port->tx_conf[qid].offloads;
3300 port->tx_conf[qid] = port->dev_info.default_txconf;
3302 port->tx_conf[qid].offloads = offloads;
3304 /* Check if any Tx parameters have been passed */
3305 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3306 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3308 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3309 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3311 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3312 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3314 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3315 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3317 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3318 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3320 port->nb_tx_desc[qid] = nb_txd;
3325 init_port_config(void)
3328 struct rte_port *port;
3331 RTE_ETH_FOREACH_DEV(pid) {
3333 port->dev_conf.fdir_conf = fdir_conf;
3335 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3340 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3341 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3342 rss_hf & port->dev_info.flow_type_rss_offloads;
3344 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3345 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3348 if (port->dcb_flag == 0) {
3349 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3350 port->dev_conf.rxmode.mq_mode =
3351 (enum rte_eth_rx_mq_mode)
3352 (rx_mq_mode & ETH_MQ_RX_RSS);
3354 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3357 rxtx_port_config(port);
3359 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3363 map_port_queue_stats_mapping_registers(pid, port);
3364 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3365 rte_pmd_ixgbe_bypass_init(pid);
3368 if (lsc_interrupt &&
3369 (rte_eth_devices[pid].data->dev_flags &
3370 RTE_ETH_DEV_INTR_LSC))
3371 port->dev_conf.intr_conf.lsc = 1;
3372 if (rmv_interrupt &&
3373 (rte_eth_devices[pid].data->dev_flags &
3374 RTE_ETH_DEV_INTR_RMV))
3375 port->dev_conf.intr_conf.rmv = 1;
3379 void set_port_slave_flag(portid_t slave_pid)
3381 struct rte_port *port;
3383 port = &ports[slave_pid];
3384 port->slave_flag = 1;
3387 void clear_port_slave_flag(portid_t slave_pid)
3389 struct rte_port *port;
3391 port = &ports[slave_pid];
3392 port->slave_flag = 0;
3395 uint8_t port_is_bonding_slave(portid_t slave_pid)
3397 struct rte_port *port;
3399 port = &ports[slave_pid];
3400 if ((rte_eth_devices[slave_pid].data->dev_flags &
3401 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3406 const uint16_t vlan_tags[] = {
3407 0, 1, 2, 3, 4, 5, 6, 7,
3408 8, 9, 10, 11, 12, 13, 14, 15,
3409 16, 17, 18, 19, 20, 21, 22, 23,
3410 24, 25, 26, 27, 28, 29, 30, 31
3414 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3415 enum dcb_mode_enable dcb_mode,
3416 enum rte_eth_nb_tcs num_tcs,
3421 struct rte_eth_rss_conf rss_conf;
3424 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3425 * given above, and the number of traffic classes available for use.
3427 if (dcb_mode == DCB_VT_ENABLED) {
3428 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3429 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3430 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3431 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3433 /* VMDQ+DCB RX and TX configurations */
3434 vmdq_rx_conf->enable_default_pool = 0;
3435 vmdq_rx_conf->default_pool = 0;
3436 vmdq_rx_conf->nb_queue_pools =
3437 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3438 vmdq_tx_conf->nb_queue_pools =
3439 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3441 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3442 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3443 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3444 vmdq_rx_conf->pool_map[i].pools =
3445 1 << (i % vmdq_rx_conf->nb_queue_pools);
3447 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3448 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3449 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3452 /* set DCB mode of RX and TX of multiple queues */
3453 eth_conf->rxmode.mq_mode =
3454 (enum rte_eth_rx_mq_mode)
3455 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3456 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3458 struct rte_eth_dcb_rx_conf *rx_conf =
3459 ð_conf->rx_adv_conf.dcb_rx_conf;
3460 struct rte_eth_dcb_tx_conf *tx_conf =
3461 ð_conf->tx_adv_conf.dcb_tx_conf;
3463 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3465 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3469 rx_conf->nb_tcs = num_tcs;
3470 tx_conf->nb_tcs = num_tcs;
3472 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3473 rx_conf->dcb_tc[i] = i % num_tcs;
3474 tx_conf->dcb_tc[i] = i % num_tcs;
3477 eth_conf->rxmode.mq_mode =
3478 (enum rte_eth_rx_mq_mode)
3479 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3480 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3481 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3485 eth_conf->dcb_capability_en =
3486 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3488 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3494 init_port_dcb_config(portid_t pid,
3495 enum dcb_mode_enable dcb_mode,
3496 enum rte_eth_nb_tcs num_tcs,
3499 struct rte_eth_conf port_conf;
3500 struct rte_port *rte_port;
3504 rte_port = &ports[pid];
3506 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3507 /* Enter DCB configuration status */
3510 port_conf.rxmode = rte_port->dev_conf.rxmode;
3511 port_conf.txmode = rte_port->dev_conf.txmode;
3513 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3514 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3517 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3519 /* re-configure the device . */
3520 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3524 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3528 /* If dev_info.vmdq_pool_base is greater than 0,
3529 * the queue id of vmdq pools is started after pf queues.
3531 if (dcb_mode == DCB_VT_ENABLED &&
3532 rte_port->dev_info.vmdq_pool_base > 0) {
3533 printf("VMDQ_DCB multi-queue mode is nonsensical"
3534 " for port %d.", pid);
3538 /* Assume the ports in testpmd have the same dcb capability
3539 * and has the same number of rxq and txq in dcb mode
3541 if (dcb_mode == DCB_VT_ENABLED) {
3542 if (rte_port->dev_info.max_vfs > 0) {
3543 nb_rxq = rte_port->dev_info.nb_rx_queues;
3544 nb_txq = rte_port->dev_info.nb_tx_queues;
3546 nb_rxq = rte_port->dev_info.max_rx_queues;
3547 nb_txq = rte_port->dev_info.max_tx_queues;
3550 /*if vt is disabled, use all pf queues */
3551 if (rte_port->dev_info.vmdq_pool_base == 0) {
3552 nb_rxq = rte_port->dev_info.max_rx_queues;
3553 nb_txq = rte_port->dev_info.max_tx_queues;
3555 nb_rxq = (queueid_t)num_tcs;
3556 nb_txq = (queueid_t)num_tcs;
3560 rx_free_thresh = 64;
3562 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3564 rxtx_port_config(rte_port);
3566 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3567 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3568 rx_vft_set(pid, vlan_tags[i], 1);
3570 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3574 map_port_queue_stats_mapping_registers(pid, rte_port);
3576 rte_port->dcb_flag = 1;
3584 /* Configuration of Ethernet ports. */
3585 ports = rte_zmalloc("testpmd: ports",
3586 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3587 RTE_CACHE_LINE_SIZE);
3588 if (ports == NULL) {
3589 rte_exit(EXIT_FAILURE,
3590 "rte_zmalloc(%d struct rte_port) failed\n",
3594 /* Initialize ports NUMA structures */
3595 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3596 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3597 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3611 const char clr[] = { 27, '[', '2', 'J', '\0' };
3612 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3614 /* Clear screen and move to top left */
3615 printf("%s%s", clr, top_left);
3617 printf("\nPort statistics ====================================");
3618 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3619 nic_stats_display(fwd_ports_ids[i]);
3625 signal_handler(int signum)
3627 if (signum == SIGINT || signum == SIGTERM) {
3628 printf("\nSignal %d received, preparing to exit...\n",
3630 #ifdef RTE_LIBRTE_PDUMP
3631 /* uninitialize packet capture framework */
3634 #ifdef RTE_LIBRTE_LATENCY_STATS
3635 if (latencystats_enabled != 0)
3636 rte_latencystats_uninit();
3639 /* Set flag to indicate the force termination. */
3641 /* exit with the expected status */
3642 signal(signum, SIG_DFL);
3643 kill(getpid(), signum);
3648 main(int argc, char** argv)
3655 signal(SIGINT, signal_handler);
3656 signal(SIGTERM, signal_handler);
3658 testpmd_logtype = rte_log_register("testpmd");
3659 if (testpmd_logtype < 0)
3660 rte_exit(EXIT_FAILURE, "Cannot register log type");
3661 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3663 diag = rte_eal_init(argc, argv);
3665 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3666 rte_strerror(rte_errno));
3668 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3669 rte_exit(EXIT_FAILURE,
3670 "Secondary process type not supported.\n");
3672 ret = register_eth_event_callback();
3674 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3676 #ifdef RTE_LIBRTE_PDUMP
3677 /* initialize packet capture framework */
3682 RTE_ETH_FOREACH_DEV(port_id) {
3683 ports_ids[count] = port_id;
3686 nb_ports = (portid_t) count;
3688 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3690 /* allocate port structures, and init them */
3693 set_def_fwd_config();
3695 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3696 "Check the core mask argument\n");
3698 /* Bitrate/latency stats disabled by default */
3699 #ifdef RTE_LIBRTE_BITRATE
3700 bitrate_enabled = 0;
3702 #ifdef RTE_LIBRTE_LATENCY_STATS
3703 latencystats_enabled = 0;
3706 /* on FreeBSD, mlockall() is disabled by default */
3707 #ifdef RTE_EXEC_ENV_FREEBSD
3716 launch_args_parse(argc, argv);
3718 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3719 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3723 if (tx_first && interactive)
3724 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3725 "interactive mode.\n");
3727 if (tx_first && lsc_interrupt) {
3728 printf("Warning: lsc_interrupt needs to be off when "
3729 " using tx_first. Disabling.\n");
3733 if (!nb_rxq && !nb_txq)
3734 printf("Warning: Either rx or tx queues should be non-zero\n");
3736 if (nb_rxq > 1 && nb_rxq > nb_txq)
3737 printf("Warning: nb_rxq=%d enables RSS configuration, "
3738 "but nb_txq=%d will prevent to fully test it.\n",
3744 ret = rte_dev_hotplug_handle_enable();
3747 "fail to enable hotplug handling.");
3751 ret = rte_dev_event_monitor_start();
3754 "fail to start device event monitoring.");
3758 ret = rte_dev_event_callback_register(NULL,
3759 dev_event_callback, NULL);
3762 "fail to register device event callback\n");
3767 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3768 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3770 /* set all ports to promiscuous mode by default */
3771 RTE_ETH_FOREACH_DEV(port_id) {
3772 ret = rte_eth_promiscuous_enable(port_id);
3774 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3775 port_id, rte_strerror(-ret));
3778 /* Init metrics library */
3779 rte_metrics_init(rte_socket_id());
3781 #ifdef RTE_LIBRTE_LATENCY_STATS
3782 if (latencystats_enabled != 0) {
3783 int ret = rte_latencystats_init(1, NULL);
3785 printf("Warning: latencystats init()"
3786 " returned error %d\n", ret);
3787 printf("Latencystats running on lcore %d\n",
3788 latencystats_lcore_id);
3792 /* Setup bitrate stats */
3793 #ifdef RTE_LIBRTE_BITRATE
3794 if (bitrate_enabled != 0) {
3795 bitrate_data = rte_stats_bitrate_create();
3796 if (bitrate_data == NULL)
3797 rte_exit(EXIT_FAILURE,
3798 "Could not allocate bitrate data.\n");
3799 rte_stats_bitrate_reg(bitrate_data);
3803 #ifdef RTE_LIBRTE_CMDLINE
3804 if (strlen(cmdline_filename) != 0)
3805 cmdline_read_from_file(cmdline_filename);
3807 if (interactive == 1) {
3809 printf("Start automatic packet forwarding\n");
3810 start_packet_forwarding(0);
3822 printf("No commandline core given, start packet forwarding\n");
3823 start_packet_forwarding(tx_first);
3824 if (stats_period != 0) {
3825 uint64_t prev_time = 0, cur_time, diff_time = 0;
3826 uint64_t timer_period;
3828 /* Convert to number of cycles */
3829 timer_period = stats_period * rte_get_timer_hz();
3831 while (f_quit == 0) {
3832 cur_time = rte_get_timer_cycles();
3833 diff_time += cur_time - prev_time;
3835 if (diff_time >= timer_period) {
3837 /* Reset the timer */
3840 /* Sleep to avoid unnecessary checks */
3841 prev_time = cur_time;
3846 printf("Press enter to exit\n");
3847 rc = read(0, &c, 1);
3853 ret = rte_eal_cleanup();
3855 rte_exit(EXIT_FAILURE,
3856 "EAL cleanup failed: %s\n", strerror(-ret));
3858 return EXIT_SUCCESS;