1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
50 #include <rte_pmd_ixgbe.h>
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIB_BITRATESTATS
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIB_LATENCYSTATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use main core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 &five_tuple_swap_fwd_engine,
183 #ifdef RTE_LIBRTE_IEEE1588
184 &ieee1588_fwd_engine,
189 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
190 uint16_t mempool_flags;
192 struct fwd_config cur_fwd_config;
193 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194 uint32_t retry_enabled;
195 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
198 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
199 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
200 DEFAULT_MBUF_DATA_SIZE
201 }; /**< Mbuf data space size. */
202 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
203 * specified on command-line. */
204 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
207 * In container, it cannot terminate the process which running with 'stats-period'
208 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
213 * Configuration of packet segments used to scatter received packets
214 * if some of split features is configured.
216 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
217 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
218 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
219 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
222 * Configuration of packet segments used by the "txonly" processing engine.
224 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226 TXONLY_DEF_PACKET_LEN,
228 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
230 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
231 /**< Split policy for packets to TX. */
233 uint8_t txonly_multi_flow;
234 /**< Whether multiple flows are generated in TXONLY mode. */
236 uint32_t tx_pkt_times_inter;
237 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
239 uint32_t tx_pkt_times_intra;
240 /**< Timings for send scheduling in TXONLY mode, time between packets. */
242 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
244 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
246 /* current configuration is in DCB or not,0 means it is not in DCB mode */
247 uint8_t dcb_config = 0;
250 * Configurable number of RX/TX queues.
252 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
253 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
254 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
257 * Configurable number of RX/TX ring descriptors.
258 * Defaults are supplied by drivers via ethdev.
260 #define RTE_TEST_RX_DESC_DEFAULT 0
261 #define RTE_TEST_TX_DESC_DEFAULT 0
262 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
263 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
265 #define RTE_PMD_PARAM_UNSET -1
267 * Configurable values of RX and TX ring threshold registers.
270 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
271 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
272 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
274 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
275 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
276 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of RX free threshold.
281 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of RX drop enable.
286 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
289 * Configurable value of TX free threshold.
291 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
294 * Configurable value of TX RS bit threshold.
296 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
299 * Configurable value of buffered packets before sending.
301 uint16_t noisy_tx_sw_bufsz;
304 * Configurable value of packet buffer timeout.
306 uint16_t noisy_tx_sw_buf_flush_time;
309 * Configurable value for size of VNF internal memory area
310 * used for simulating noisy neighbour behaviour
312 uint64_t noisy_lkup_mem_sz;
315 * Configurable value of number of random writes done in
316 * VNF simulation memory area.
318 uint64_t noisy_lkup_num_writes;
321 * Configurable value of number of random reads done in
322 * VNF simulation memory area.
324 uint64_t noisy_lkup_num_reads;
327 * Configurable value of number of random reads/writes done in
328 * VNF simulation memory area.
330 uint64_t noisy_lkup_num_reads_writes;
333 * Receive Side Scaling (RSS) configuration.
335 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
338 * Port topology configuration
340 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
343 * Avoids to flush all the RX streams before starts forwarding.
345 uint8_t no_flush_rx = 0; /* flush by default */
348 * Flow API isolated mode.
350 uint8_t flow_isolate_all;
353 * Avoids to check link status when starting/stopping a port.
355 uint8_t no_link_check = 0; /* check by default */
358 * Don't automatically start all ports in interactive mode.
360 uint8_t no_device_start = 0;
363 * Enable link status change notification
365 uint8_t lsc_interrupt = 1; /* enabled by default */
368 * Enable device removal notification.
370 uint8_t rmv_interrupt = 1; /* enabled by default */
372 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
374 /* After attach, port setup is called on event or by iterator */
375 bool setup_on_probe_event = true;
377 /* Clear ptypes on port initialization. */
378 uint8_t clear_ptypes = true;
380 /* Hairpin ports configuration mode. */
381 uint16_t hairpin_mode;
383 /* Pretty printing of ethdev events */
384 static const char * const eth_event_desc[] = {
385 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
386 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
387 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
388 [RTE_ETH_EVENT_INTR_RESET] = "reset",
389 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
390 [RTE_ETH_EVENT_IPSEC] = "IPsec",
391 [RTE_ETH_EVENT_MACSEC] = "MACsec",
392 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
393 [RTE_ETH_EVENT_NEW] = "device probed",
394 [RTE_ETH_EVENT_DESTROY] = "device released",
395 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
396 [RTE_ETH_EVENT_MAX] = NULL,
400 * Display or mask ether events
401 * Default to all events except VF_MBOX
403 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
404 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
405 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
406 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
407 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
408 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
409 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
410 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
412 * Decide if all memory are locked for performance.
417 * NIC bypass mode configuration options.
420 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
421 /* The NIC bypass watchdog timeout. */
422 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
426 #ifdef RTE_LIB_LATENCYSTATS
429 * Set when latency stats is enabled in the commandline
431 uint8_t latencystats_enabled;
434 * Lcore ID to serive latency statistics.
436 lcoreid_t latencystats_lcore_id = -1;
441 * Ethernet device configuration.
443 struct rte_eth_rxmode rx_mode = {
444 /* Default maximum frame length.
445 * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
451 struct rte_eth_txmode tx_mode = {
452 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
455 struct rte_fdir_conf fdir_conf = {
456 .mode = RTE_FDIR_MODE_NONE,
457 .pballoc = RTE_FDIR_PBALLOC_64K,
458 .status = RTE_FDIR_REPORT_STATUS,
460 .vlan_tci_mask = 0xFFEF,
462 .src_ip = 0xFFFFFFFF,
463 .dst_ip = 0xFFFFFFFF,
466 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
467 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
469 .src_port_mask = 0xFFFF,
470 .dst_port_mask = 0xFFFF,
471 .mac_addr_byte_mask = 0xFF,
472 .tunnel_type_mask = 1,
473 .tunnel_id_mask = 0xFFFFFFFF,
478 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
481 * Display zero values by default for xstats
483 uint8_t xstats_hide_zero;
486 * Measure of CPU cycles disabled by default
488 uint8_t record_core_cycles;
491 * Display of RX and TX bursts disabled by default
493 uint8_t record_burst_stats;
495 unsigned int num_sockets = 0;
496 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
498 #ifdef RTE_LIB_BITRATESTATS
499 /* Bitrate statistics */
500 struct rte_stats_bitrates *bitrate_data;
501 lcoreid_t bitrate_lcore_id;
502 uint8_t bitrate_enabled;
505 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
506 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
509 * hexadecimal bitmask of RX mq mode can be enabled.
511 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
514 * Used to set forced link speed
516 uint32_t eth_link_speed;
518 /* Forward function declarations */
519 static void setup_attached_port(portid_t pi);
520 static void check_all_ports_link_status(uint32_t port_mask);
521 static int eth_event_callback(portid_t port_id,
522 enum rte_eth_event_type type,
523 void *param, void *ret_param);
524 static void dev_event_callback(const char *device_name,
525 enum rte_dev_event_type type,
529 * Check if all the ports are started.
530 * If yes, return positive value. If not, return zero.
532 static int all_ports_started(void);
534 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
535 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
537 /* Holds the registered mbuf dynamic flags names. */
538 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
541 * Helper function to check if socket is already discovered.
542 * If yes, return positive value. If not, return zero.
545 new_socket_id(unsigned int socket_id)
549 for (i = 0; i < num_sockets; i++) {
550 if (socket_ids[i] == socket_id)
557 * Setup default configuration.
560 set_default_fwd_lcores_config(void)
564 unsigned int sock_num;
567 for (i = 0; i < RTE_MAX_LCORE; i++) {
568 if (!rte_lcore_is_enabled(i))
570 sock_num = rte_lcore_to_socket_id(i);
571 if (new_socket_id(sock_num)) {
572 if (num_sockets >= RTE_MAX_NUMA_NODES) {
573 rte_exit(EXIT_FAILURE,
574 "Total sockets greater than %u\n",
577 socket_ids[num_sockets++] = sock_num;
579 if (i == rte_get_main_lcore())
581 fwd_lcores_cpuids[nb_lc++] = i;
583 nb_lcores = (lcoreid_t) nb_lc;
584 nb_cfg_lcores = nb_lcores;
589 set_def_peer_eth_addrs(void)
593 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
594 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
595 peer_eth_addrs[i].addr_bytes[5] = i;
600 set_default_fwd_ports_config(void)
605 RTE_ETH_FOREACH_DEV(pt_id) {
606 fwd_ports_ids[i++] = pt_id;
608 /* Update sockets info according to the attached device */
609 int socket_id = rte_eth_dev_socket_id(pt_id);
610 if (socket_id >= 0 && new_socket_id(socket_id)) {
611 if (num_sockets >= RTE_MAX_NUMA_NODES) {
612 rte_exit(EXIT_FAILURE,
613 "Total sockets greater than %u\n",
616 socket_ids[num_sockets++] = socket_id;
620 nb_cfg_ports = nb_ports;
621 nb_fwd_ports = nb_ports;
625 set_def_fwd_config(void)
627 set_default_fwd_lcores_config();
628 set_def_peer_eth_addrs();
629 set_default_fwd_ports_config();
632 /* extremely pessimistic estimation of memory required to create a mempool */
634 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
636 unsigned int n_pages, mbuf_per_pg, leftover;
637 uint64_t total_mem, mbuf_mem, obj_sz;
639 /* there is no good way to predict how much space the mempool will
640 * occupy because it will allocate chunks on the fly, and some of those
641 * will come from default DPDK memory while some will come from our
642 * external memory, so just assume 128MB will be enough for everyone.
644 uint64_t hdr_mem = 128 << 20;
646 /* account for possible non-contiguousness */
647 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
649 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
653 mbuf_per_pg = pgsz / obj_sz;
654 leftover = (nb_mbufs % mbuf_per_pg) > 0;
655 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
657 mbuf_mem = n_pages * pgsz;
659 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
661 if (total_mem > SIZE_MAX) {
662 TESTPMD_LOG(ERR, "Memory size too big\n");
665 *out = (size_t)total_mem;
671 pagesz_flags(uint64_t page_sz)
673 /* as per mmap() manpage, all page sizes are log2 of page size
674 * shifted by MAP_HUGE_SHIFT
676 int log2 = rte_log2_u64(page_sz);
678 return (log2 << HUGE_SHIFT);
682 alloc_mem(size_t memsz, size_t pgsz, bool huge)
687 /* allocate anonymous hugepages */
688 flags = MAP_ANONYMOUS | MAP_PRIVATE;
690 flags |= HUGE_FLAG | pagesz_flags(pgsz);
692 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
693 if (addr == MAP_FAILED)
699 struct extmem_param {
703 rte_iova_t *iova_table;
704 unsigned int iova_table_len;
708 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
711 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
712 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
713 unsigned int cur_page, n_pages, pgsz_idx;
714 size_t mem_sz, cur_pgsz;
715 rte_iova_t *iovas = NULL;
719 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
720 /* skip anything that is too big */
721 if (pgsizes[pgsz_idx] > SIZE_MAX)
724 cur_pgsz = pgsizes[pgsz_idx];
726 /* if we were told not to allocate hugepages, override */
728 cur_pgsz = sysconf(_SC_PAGESIZE);
730 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
732 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
736 /* allocate our memory */
737 addr = alloc_mem(mem_sz, cur_pgsz, huge);
739 /* if we couldn't allocate memory with a specified page size,
740 * that doesn't mean we can't do it with other page sizes, so
746 /* store IOVA addresses for every page in this memory area */
747 n_pages = mem_sz / cur_pgsz;
749 iovas = malloc(sizeof(*iovas) * n_pages);
752 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
755 /* lock memory if it's not huge pages */
759 /* populate IOVA addresses */
760 for (cur_page = 0; cur_page < n_pages; cur_page++) {
765 offset = cur_pgsz * cur_page;
766 cur = RTE_PTR_ADD(addr, offset);
768 /* touch the page before getting its IOVA */
769 *(volatile char *)cur = 0;
771 iova = rte_mem_virt2iova(cur);
773 iovas[cur_page] = iova;
778 /* if we couldn't allocate anything */
784 param->pgsz = cur_pgsz;
785 param->iova_table = iovas;
786 param->iova_table_len = n_pages;
793 munmap(addr, mem_sz);
799 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
801 struct extmem_param param;
804 memset(¶m, 0, sizeof(param));
806 /* check if our heap exists */
807 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
809 /* create our heap */
810 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
812 TESTPMD_LOG(ERR, "Cannot create heap\n");
817 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
819 TESTPMD_LOG(ERR, "Cannot create memory area\n");
823 /* we now have a valid memory area, so add it to heap */
824 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
825 param.addr, param.len, param.iova_table,
826 param.iova_table_len, param.pgsz);
828 /* when using VFIO, memory is automatically mapped for DMA by EAL */
830 /* not needed any more */
831 free(param.iova_table);
834 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
835 munmap(param.addr, param.len);
841 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
847 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
848 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
853 RTE_ETH_FOREACH_DEV(pid) {
854 struct rte_eth_dev *dev =
855 &rte_eth_devices[pid];
857 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
861 "unable to DMA unmap addr 0x%p "
863 memhdr->addr, dev->data->name);
866 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
869 "unable to un-register addr 0x%p\n", memhdr->addr);
874 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
875 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
878 size_t page_size = sysconf(_SC_PAGESIZE);
881 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
885 "unable to register addr 0x%p\n", memhdr->addr);
888 RTE_ETH_FOREACH_DEV(pid) {
889 struct rte_eth_dev *dev =
890 &rte_eth_devices[pid];
892 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
896 "unable to DMA map addr 0x%p "
898 memhdr->addr, dev->data->name);
904 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
905 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
907 struct rte_pktmbuf_extmem *xmem;
908 unsigned int ext_num, zone_num, elt_num;
911 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
912 elt_num = EXTBUF_ZONE_SIZE / elt_size;
913 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
915 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
917 TESTPMD_LOG(ERR, "Cannot allocate memory for "
918 "external buffer descriptors\n");
922 for (ext_num = 0; ext_num < zone_num; ext_num++) {
923 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
924 const struct rte_memzone *mz;
925 char mz_name[RTE_MEMZONE_NAMESIZE];
928 ret = snprintf(mz_name, sizeof(mz_name),
929 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
930 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
931 errno = ENAMETOOLONG;
935 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
937 RTE_MEMZONE_IOVA_CONTIG |
939 RTE_MEMZONE_SIZE_HINT_ONLY,
943 * The caller exits on external buffer creation
944 * error, so there is no need to free memzones.
950 xseg->buf_ptr = mz->addr;
951 xseg->buf_iova = mz->iova;
952 xseg->buf_len = EXTBUF_ZONE_SIZE;
953 xseg->elt_size = elt_size;
955 if (ext_num == 0 && xmem != NULL) {
964 * Configuration initialisation done once at init time.
966 static struct rte_mempool *
967 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
968 unsigned int socket_id, uint16_t size_idx)
970 char pool_name[RTE_MEMPOOL_NAMESIZE];
971 struct rte_mempool *rte_mp = NULL;
974 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
975 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
978 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
979 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
981 switch (mp_alloc_type) {
982 case MP_ALLOC_NATIVE:
984 /* wrapper to rte_mempool_create() */
985 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
986 rte_mbuf_best_mempool_ops());
987 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
988 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
993 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
994 mb_size, (unsigned int) mb_mempool_cache,
995 sizeof(struct rte_pktmbuf_pool_private),
996 socket_id, mempool_flags);
1000 if (rte_mempool_populate_anon(rte_mp) == 0) {
1001 rte_mempool_free(rte_mp);
1005 rte_pktmbuf_pool_init(rte_mp, NULL);
1006 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1007 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1011 case MP_ALLOC_XMEM_HUGE:
1014 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1016 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1017 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1020 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1021 if (heap_socket < 0)
1022 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1024 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1025 rte_mbuf_best_mempool_ops());
1026 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1027 mb_mempool_cache, 0, mbuf_seg_size,
1033 struct rte_pktmbuf_extmem *ext_mem;
1034 unsigned int ext_num;
1036 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1037 socket_id, pool_name, &ext_mem);
1039 rte_exit(EXIT_FAILURE,
1040 "Can't create pinned data buffers\n");
1042 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1043 rte_mbuf_best_mempool_ops());
1044 rte_mp = rte_pktmbuf_pool_create_extbuf
1045 (pool_name, nb_mbuf, mb_mempool_cache,
1046 0, mbuf_seg_size, socket_id,
1053 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1058 if (rte_mp == NULL) {
1059 rte_exit(EXIT_FAILURE,
1060 "Creation of mbuf pool for socket %u failed: %s\n",
1061 socket_id, rte_strerror(rte_errno));
1062 } else if (verbose_level > 0) {
1063 rte_mempool_dump(stdout, rte_mp);
1069 * Check given socket id is valid or not with NUMA mode,
1070 * if valid, return 0, else return -1
1073 check_socket_id(const unsigned int socket_id)
1075 static int warning_once = 0;
1077 if (new_socket_id(socket_id)) {
1078 if (!warning_once && numa_support)
1079 printf("Warning: NUMA should be configured manually by"
1080 " using --port-numa-config and"
1081 " --ring-numa-config parameters along with"
1090 * Get the allowed maximum number of RX queues.
1091 * *pid return the port id which has minimal value of
1092 * max_rx_queues in all ports.
1095 get_allowed_max_nb_rxq(portid_t *pid)
1097 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1098 bool max_rxq_valid = false;
1100 struct rte_eth_dev_info dev_info;
1102 RTE_ETH_FOREACH_DEV(pi) {
1103 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1106 max_rxq_valid = true;
1107 if (dev_info.max_rx_queues < allowed_max_rxq) {
1108 allowed_max_rxq = dev_info.max_rx_queues;
1112 return max_rxq_valid ? allowed_max_rxq : 0;
1116 * Check input rxq is valid or not.
1117 * If input rxq is not greater than any of maximum number
1118 * of RX queues of all ports, it is valid.
1119 * if valid, return 0, else return -1
1122 check_nb_rxq(queueid_t rxq)
1124 queueid_t allowed_max_rxq;
1127 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1128 if (rxq > allowed_max_rxq) {
1129 printf("Fail: input rxq (%u) can't be greater "
1130 "than max_rx_queues (%u) of port %u\n",
1140 * Get the allowed maximum number of TX queues.
1141 * *pid return the port id which has minimal value of
1142 * max_tx_queues in all ports.
1145 get_allowed_max_nb_txq(portid_t *pid)
1147 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1148 bool max_txq_valid = false;
1150 struct rte_eth_dev_info dev_info;
1152 RTE_ETH_FOREACH_DEV(pi) {
1153 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1156 max_txq_valid = true;
1157 if (dev_info.max_tx_queues < allowed_max_txq) {
1158 allowed_max_txq = dev_info.max_tx_queues;
1162 return max_txq_valid ? allowed_max_txq : 0;
1166 * Check input txq is valid or not.
1167 * If input txq is not greater than any of maximum number
1168 * of TX queues of all ports, it is valid.
1169 * if valid, return 0, else return -1
1172 check_nb_txq(queueid_t txq)
1174 queueid_t allowed_max_txq;
1177 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1178 if (txq > allowed_max_txq) {
1179 printf("Fail: input txq (%u) can't be greater "
1180 "than max_tx_queues (%u) of port %u\n",
1190 * Get the allowed maximum number of RXDs of every rx queue.
1191 * *pid return the port id which has minimal value of
1192 * max_rxd in all queues of all ports.
1195 get_allowed_max_nb_rxd(portid_t *pid)
1197 uint16_t allowed_max_rxd = UINT16_MAX;
1199 struct rte_eth_dev_info dev_info;
1201 RTE_ETH_FOREACH_DEV(pi) {
1202 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1205 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1206 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1210 return allowed_max_rxd;
1214 * Get the allowed minimal number of RXDs of every rx queue.
1215 * *pid return the port id which has minimal value of
1216 * min_rxd in all queues of all ports.
1219 get_allowed_min_nb_rxd(portid_t *pid)
1221 uint16_t allowed_min_rxd = 0;
1223 struct rte_eth_dev_info dev_info;
1225 RTE_ETH_FOREACH_DEV(pi) {
1226 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1229 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1230 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1235 return allowed_min_rxd;
1239 * Check input rxd is valid or not.
1240 * If input rxd is not greater than any of maximum number
1241 * of RXDs of every Rx queues and is not less than any of
1242 * minimal number of RXDs of every Rx queues, it is valid.
1243 * if valid, return 0, else return -1
1246 check_nb_rxd(queueid_t rxd)
1248 uint16_t allowed_max_rxd;
1249 uint16_t allowed_min_rxd;
1252 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1253 if (rxd > allowed_max_rxd) {
1254 printf("Fail: input rxd (%u) can't be greater "
1255 "than max_rxds (%u) of port %u\n",
1262 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1263 if (rxd < allowed_min_rxd) {
1264 printf("Fail: input rxd (%u) can't be less "
1265 "than min_rxds (%u) of port %u\n",
1276 * Get the allowed maximum number of TXDs of every rx queues.
1277 * *pid return the port id which has minimal value of
1278 * max_txd in every tx queue.
1281 get_allowed_max_nb_txd(portid_t *pid)
1283 uint16_t allowed_max_txd = UINT16_MAX;
1285 struct rte_eth_dev_info dev_info;
1287 RTE_ETH_FOREACH_DEV(pi) {
1288 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1291 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1292 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1296 return allowed_max_txd;
1300 * Get the allowed maximum number of TXDs of every tx queues.
1301 * *pid return the port id which has minimal value of
1302 * min_txd in every tx queue.
1305 get_allowed_min_nb_txd(portid_t *pid)
1307 uint16_t allowed_min_txd = 0;
1309 struct rte_eth_dev_info dev_info;
1311 RTE_ETH_FOREACH_DEV(pi) {
1312 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1315 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1316 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1321 return allowed_min_txd;
1325 * Check input txd is valid or not.
1326 * If input txd is not greater than any of maximum number
1327 * of TXDs of every Rx queues, it is valid.
1328 * if valid, return 0, else return -1
1331 check_nb_txd(queueid_t txd)
1333 uint16_t allowed_max_txd;
1334 uint16_t allowed_min_txd;
1337 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1338 if (txd > allowed_max_txd) {
1339 printf("Fail: input txd (%u) can't be greater "
1340 "than max_txds (%u) of port %u\n",
1347 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1348 if (txd < allowed_min_txd) {
1349 printf("Fail: input txd (%u) can't be less "
1350 "than min_txds (%u) of port %u\n",
1361 * Get the allowed maximum number of hairpin queues.
1362 * *pid return the port id which has minimal value of
1363 * max_hairpin_queues in all ports.
1366 get_allowed_max_nb_hairpinq(portid_t *pid)
1368 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1370 struct rte_eth_hairpin_cap cap;
1372 RTE_ETH_FOREACH_DEV(pi) {
1373 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1377 if (cap.max_nb_queues < allowed_max_hairpinq) {
1378 allowed_max_hairpinq = cap.max_nb_queues;
1382 return allowed_max_hairpinq;
1386 * Check input hairpin is valid or not.
1387 * If input hairpin is not greater than any of maximum number
1388 * of hairpin queues of all ports, it is valid.
1389 * if valid, return 0, else return -1
1392 check_nb_hairpinq(queueid_t hairpinq)
1394 queueid_t allowed_max_hairpinq;
1397 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1398 if (hairpinq > allowed_max_hairpinq) {
1399 printf("Fail: input hairpin (%u) can't be greater "
1400 "than max_hairpin_queues (%u) of port %u\n",
1401 hairpinq, allowed_max_hairpinq, pid);
1411 struct rte_port *port;
1412 struct rte_mempool *mbp;
1413 unsigned int nb_mbuf_per_pool;
1415 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1416 struct rte_gro_param gro_param;
1423 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1425 /* Configuration of logical cores. */
1426 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1427 sizeof(struct fwd_lcore *) * nb_lcores,
1428 RTE_CACHE_LINE_SIZE);
1429 if (fwd_lcores == NULL) {
1430 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1431 "failed\n", nb_lcores);
1433 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1434 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1435 sizeof(struct fwd_lcore),
1436 RTE_CACHE_LINE_SIZE);
1437 if (fwd_lcores[lc_id] == NULL) {
1438 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1441 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1444 RTE_ETH_FOREACH_DEV(pid) {
1446 /* Apply default TxRx configuration for all ports */
1447 port->dev_conf.txmode = tx_mode;
1448 port->dev_conf.rxmode = rx_mode;
1450 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1452 rte_exit(EXIT_FAILURE,
1453 "rte_eth_dev_info_get() failed\n");
1455 ret = update_jumbo_frame_offload(pid);
1457 printf("Updating jumbo frame offload failed for port %u\n",
1460 if (!(port->dev_info.tx_offload_capa &
1461 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1462 port->dev_conf.txmode.offloads &=
1463 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1465 if (port_numa[pid] != NUMA_NO_CONFIG)
1466 port_per_socket[port_numa[pid]]++;
1468 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1471 * if socket_id is invalid,
1472 * set to the first available socket.
1474 if (check_socket_id(socket_id) < 0)
1475 socket_id = socket_ids[0];
1476 port_per_socket[socket_id]++;
1480 /* Apply Rx offloads configuration */
1481 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1482 port->rx_conf[k].offloads =
1483 port->dev_conf.rxmode.offloads;
1484 /* Apply Tx offloads configuration */
1485 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1486 port->tx_conf[k].offloads =
1487 port->dev_conf.txmode.offloads;
1490 port->dev_conf.link_speeds = eth_link_speed;
1492 /* set flag to initialize port/queue */
1493 port->need_reconfig = 1;
1494 port->need_reconfig_queues = 1;
1495 port->tx_metadata = 0;
1497 /* Check for maximum number of segments per MTU. Accordingly
1498 * update the mbuf data size.
1500 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1501 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1502 data_size = rx_mode.max_rx_pkt_len /
1503 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1505 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1506 mbuf_data_size[0]) {
1507 mbuf_data_size[0] = data_size +
1508 RTE_PKTMBUF_HEADROOM;
1515 TESTPMD_LOG(WARNING,
1516 "Configured mbuf size of the first segment %hu\n",
1519 * Create pools of mbuf.
1520 * If NUMA support is disabled, create a single pool of mbuf in
1521 * socket 0 memory by default.
1522 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1524 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1525 * nb_txd can be configured at run time.
1527 if (param_total_num_mbufs)
1528 nb_mbuf_per_pool = param_total_num_mbufs;
1530 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1531 (nb_lcores * mb_mempool_cache) +
1532 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1533 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1539 for (i = 0; i < num_sockets; i++)
1540 for (j = 0; j < mbuf_data_size_n; j++)
1541 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1542 mbuf_pool_create(mbuf_data_size[j],
1548 for (i = 0; i < mbuf_data_size_n; i++)
1549 mempools[i] = mbuf_pool_create
1552 socket_num == UMA_NO_CONFIG ?
1558 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1559 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1561 * Records which Mbuf pool to use by each logical core, if needed.
1563 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1564 mbp = mbuf_pool_find(
1565 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1568 mbp = mbuf_pool_find(0, 0);
1569 fwd_lcores[lc_id]->mbp = mbp;
1570 /* initialize GSO context */
1571 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1572 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1573 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1574 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1576 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1579 /* Configuration of packet forwarding streams. */
1580 if (init_fwd_streams() < 0)
1581 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1585 /* create a gro context for each lcore */
1586 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1587 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1588 gro_param.max_item_per_flow = MAX_PKT_BURST;
1589 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1590 gro_param.socket_id = rte_lcore_to_socket_id(
1591 fwd_lcores_cpuids[lc_id]);
1592 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1593 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1594 rte_exit(EXIT_FAILURE,
1595 "rte_gro_ctx_create() failed\n");
1602 reconfig(portid_t new_port_id, unsigned socket_id)
1604 struct rte_port *port;
1607 /* Reconfiguration of Ethernet ports. */
1608 port = &ports[new_port_id];
1610 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1614 /* set flag to initialize port/queue */
1615 port->need_reconfig = 1;
1616 port->need_reconfig_queues = 1;
1617 port->socket_id = socket_id;
1624 init_fwd_streams(void)
1627 struct rte_port *port;
1628 streamid_t sm_id, nb_fwd_streams_new;
1631 /* set socket id according to numa or not */
1632 RTE_ETH_FOREACH_DEV(pid) {
1634 if (nb_rxq > port->dev_info.max_rx_queues) {
1635 printf("Fail: nb_rxq(%d) is greater than "
1636 "max_rx_queues(%d)\n", nb_rxq,
1637 port->dev_info.max_rx_queues);
1640 if (nb_txq > port->dev_info.max_tx_queues) {
1641 printf("Fail: nb_txq(%d) is greater than "
1642 "max_tx_queues(%d)\n", nb_txq,
1643 port->dev_info.max_tx_queues);
1647 if (port_numa[pid] != NUMA_NO_CONFIG)
1648 port->socket_id = port_numa[pid];
1650 port->socket_id = rte_eth_dev_socket_id(pid);
1653 * if socket_id is invalid,
1654 * set to the first available socket.
1656 if (check_socket_id(port->socket_id) < 0)
1657 port->socket_id = socket_ids[0];
1661 if (socket_num == UMA_NO_CONFIG)
1662 port->socket_id = 0;
1664 port->socket_id = socket_num;
1668 q = RTE_MAX(nb_rxq, nb_txq);
1670 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1673 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1674 if (nb_fwd_streams_new == nb_fwd_streams)
1677 if (fwd_streams != NULL) {
1678 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1679 if (fwd_streams[sm_id] == NULL)
1681 rte_free(fwd_streams[sm_id]);
1682 fwd_streams[sm_id] = NULL;
1684 rte_free(fwd_streams);
1689 nb_fwd_streams = nb_fwd_streams_new;
1690 if (nb_fwd_streams) {
1691 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1692 sizeof(struct fwd_stream *) * nb_fwd_streams,
1693 RTE_CACHE_LINE_SIZE);
1694 if (fwd_streams == NULL)
1695 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1696 " (struct fwd_stream *)) failed\n",
1699 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1700 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1701 " struct fwd_stream", sizeof(struct fwd_stream),
1702 RTE_CACHE_LINE_SIZE);
1703 if (fwd_streams[sm_id] == NULL)
1704 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1705 "(struct fwd_stream) failed\n");
1713 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1715 uint64_t total_burst, sburst;
1717 uint64_t burst_stats[4];
1718 uint16_t pktnb_stats[4];
1720 int burst_percent[4], sburstp;
1724 * First compute the total number of packet bursts and the
1725 * two highest numbers of bursts of the same number of packets.
1727 memset(&burst_stats, 0x0, sizeof(burst_stats));
1728 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1730 /* Show stats for 0 burst size always */
1731 total_burst = pbs->pkt_burst_spread[0];
1732 burst_stats[0] = pbs->pkt_burst_spread[0];
1735 /* Find the next 2 burst sizes with highest occurrences. */
1736 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1737 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1742 total_burst += nb_burst;
1744 if (nb_burst > burst_stats[1]) {
1745 burst_stats[2] = burst_stats[1];
1746 pktnb_stats[2] = pktnb_stats[1];
1747 burst_stats[1] = nb_burst;
1748 pktnb_stats[1] = nb_pkt;
1749 } else if (nb_burst > burst_stats[2]) {
1750 burst_stats[2] = nb_burst;
1751 pktnb_stats[2] = nb_pkt;
1754 if (total_burst == 0)
1757 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1758 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1760 printf("%d%% of other]\n", 100 - sburstp);
1764 sburst += burst_stats[i];
1765 if (sburst == total_burst) {
1766 printf("%d%% of %d pkts]\n",
1767 100 - sburstp, (int) pktnb_stats[i]);
1772 (double)burst_stats[i] / total_burst * 100;
1773 printf("%d%% of %d pkts + ",
1774 burst_percent[i], (int) pktnb_stats[i]);
1775 sburstp += burst_percent[i];
1780 fwd_stream_stats_display(streamid_t stream_id)
1782 struct fwd_stream *fs;
1783 static const char *fwd_top_stats_border = "-------";
1785 fs = fwd_streams[stream_id];
1786 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1787 (fs->fwd_dropped == 0))
1789 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1790 "TX Port=%2d/Queue=%2d %s\n",
1791 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1792 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1793 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1794 " TX-dropped: %-14"PRIu64,
1795 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1797 /* if checksum mode */
1798 if (cur_fwd_eng == &csum_fwd_engine) {
1799 printf(" RX- bad IP checksum: %-14"PRIu64
1800 " Rx- bad L4 checksum: %-14"PRIu64
1801 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1802 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1803 fs->rx_bad_outer_l4_csum);
1804 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1805 fs->rx_bad_outer_ip_csum);
1810 if (record_burst_stats) {
1811 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1812 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1817 fwd_stats_display(void)
1819 static const char *fwd_stats_border = "----------------------";
1820 static const char *acc_stats_border = "+++++++++++++++";
1822 struct fwd_stream *rx_stream;
1823 struct fwd_stream *tx_stream;
1824 uint64_t tx_dropped;
1825 uint64_t rx_bad_ip_csum;
1826 uint64_t rx_bad_l4_csum;
1827 uint64_t rx_bad_outer_l4_csum;
1828 uint64_t rx_bad_outer_ip_csum;
1829 } ports_stats[RTE_MAX_ETHPORTS];
1830 uint64_t total_rx_dropped = 0;
1831 uint64_t total_tx_dropped = 0;
1832 uint64_t total_rx_nombuf = 0;
1833 struct rte_eth_stats stats;
1834 uint64_t fwd_cycles = 0;
1835 uint64_t total_recv = 0;
1836 uint64_t total_xmit = 0;
1837 struct rte_port *port;
1842 memset(ports_stats, 0, sizeof(ports_stats));
1844 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1845 struct fwd_stream *fs = fwd_streams[sm_id];
1847 if (cur_fwd_config.nb_fwd_streams >
1848 cur_fwd_config.nb_fwd_ports) {
1849 fwd_stream_stats_display(sm_id);
1851 ports_stats[fs->tx_port].tx_stream = fs;
1852 ports_stats[fs->rx_port].rx_stream = fs;
1855 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1857 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1858 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1859 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1860 fs->rx_bad_outer_l4_csum;
1861 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
1862 fs->rx_bad_outer_ip_csum;
1864 if (record_core_cycles)
1865 fwd_cycles += fs->core_cycles;
1867 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1868 pt_id = fwd_ports_ids[i];
1869 port = &ports[pt_id];
1871 rte_eth_stats_get(pt_id, &stats);
1872 stats.ipackets -= port->stats.ipackets;
1873 stats.opackets -= port->stats.opackets;
1874 stats.ibytes -= port->stats.ibytes;
1875 stats.obytes -= port->stats.obytes;
1876 stats.imissed -= port->stats.imissed;
1877 stats.oerrors -= port->stats.oerrors;
1878 stats.rx_nombuf -= port->stats.rx_nombuf;
1880 total_recv += stats.ipackets;
1881 total_xmit += stats.opackets;
1882 total_rx_dropped += stats.imissed;
1883 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1884 total_tx_dropped += stats.oerrors;
1885 total_rx_nombuf += stats.rx_nombuf;
1887 printf("\n %s Forward statistics for port %-2d %s\n",
1888 fwd_stats_border, pt_id, fwd_stats_border);
1890 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
1891 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
1892 stats.ipackets + stats.imissed);
1894 if (cur_fwd_eng == &csum_fwd_engine) {
1895 printf(" Bad-ipcsum: %-14"PRIu64
1896 " Bad-l4csum: %-14"PRIu64
1897 "Bad-outer-l4csum: %-14"PRIu64"\n",
1898 ports_stats[pt_id].rx_bad_ip_csum,
1899 ports_stats[pt_id].rx_bad_l4_csum,
1900 ports_stats[pt_id].rx_bad_outer_l4_csum);
1901 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
1902 ports_stats[pt_id].rx_bad_outer_ip_csum);
1904 if (stats.ierrors + stats.rx_nombuf > 0) {
1905 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
1906 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
1909 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
1910 "TX-total: %-"PRIu64"\n",
1911 stats.opackets, ports_stats[pt_id].tx_dropped,
1912 stats.opackets + ports_stats[pt_id].tx_dropped);
1914 if (record_burst_stats) {
1915 if (ports_stats[pt_id].rx_stream)
1916 pkt_burst_stats_display("RX",
1917 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1918 if (ports_stats[pt_id].tx_stream)
1919 pkt_burst_stats_display("TX",
1920 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1923 printf(" %s--------------------------------%s\n",
1924 fwd_stats_border, fwd_stats_border);
1927 printf("\n %s Accumulated forward statistics for all ports"
1929 acc_stats_border, acc_stats_border);
1930 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1932 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1934 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1935 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1936 if (total_rx_nombuf > 0)
1937 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1938 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1940 acc_stats_border, acc_stats_border);
1941 if (record_core_cycles) {
1942 #define CYC_PER_MHZ 1E6
1943 if (total_recv > 0 || total_xmit > 0) {
1944 uint64_t total_pkts = 0;
1945 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
1946 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
1947 total_pkts = total_xmit;
1949 total_pkts = total_recv;
1951 printf("\n CPU cycles/packet=%.2F (total cycles="
1952 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
1954 (double) fwd_cycles / total_pkts,
1955 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
1956 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1962 fwd_stats_reset(void)
1968 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1969 pt_id = fwd_ports_ids[i];
1970 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1972 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1973 struct fwd_stream *fs = fwd_streams[sm_id];
1977 fs->fwd_dropped = 0;
1978 fs->rx_bad_ip_csum = 0;
1979 fs->rx_bad_l4_csum = 0;
1980 fs->rx_bad_outer_l4_csum = 0;
1981 fs->rx_bad_outer_ip_csum = 0;
1983 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1984 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1985 fs->core_cycles = 0;
1990 flush_fwd_rx_queues(void)
1992 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1999 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2000 uint64_t timer_period;
2002 /* convert to number of cycles */
2003 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2005 for (j = 0; j < 2; j++) {
2006 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2007 for (rxq = 0; rxq < nb_rxq; rxq++) {
2008 port_id = fwd_ports_ids[rxp];
2010 * testpmd can stuck in the below do while loop
2011 * if rte_eth_rx_burst() always returns nonzero
2012 * packets. So timer is added to exit this loop
2013 * after 1sec timer expiry.
2015 prev_tsc = rte_rdtsc();
2017 nb_rx = rte_eth_rx_burst(port_id, rxq,
2018 pkts_burst, MAX_PKT_BURST);
2019 for (i = 0; i < nb_rx; i++)
2020 rte_pktmbuf_free(pkts_burst[i]);
2022 cur_tsc = rte_rdtsc();
2023 diff_tsc = cur_tsc - prev_tsc;
2024 timer_tsc += diff_tsc;
2025 } while ((nb_rx > 0) &&
2026 (timer_tsc < timer_period));
2030 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2035 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2037 struct fwd_stream **fsm;
2040 #ifdef RTE_LIB_BITRATESTATS
2041 uint64_t tics_per_1sec;
2042 uint64_t tics_datum;
2043 uint64_t tics_current;
2044 uint16_t i, cnt_ports;
2046 cnt_ports = nb_ports;
2047 tics_datum = rte_rdtsc();
2048 tics_per_1sec = rte_get_timer_hz();
2050 fsm = &fwd_streams[fc->stream_idx];
2051 nb_fs = fc->stream_nb;
2053 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2054 (*pkt_fwd)(fsm[sm_id]);
2055 #ifdef RTE_LIB_BITRATESTATS
2056 if (bitrate_enabled != 0 &&
2057 bitrate_lcore_id == rte_lcore_id()) {
2058 tics_current = rte_rdtsc();
2059 if (tics_current - tics_datum >= tics_per_1sec) {
2060 /* Periodic bitrate calculation */
2061 for (i = 0; i < cnt_ports; i++)
2062 rte_stats_bitrate_calc(bitrate_data,
2064 tics_datum = tics_current;
2068 #ifdef RTE_LIB_LATENCYSTATS
2069 if (latencystats_enabled != 0 &&
2070 latencystats_lcore_id == rte_lcore_id())
2071 rte_latencystats_update();
2074 } while (! fc->stopped);
2078 start_pkt_forward_on_core(void *fwd_arg)
2080 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2081 cur_fwd_config.fwd_eng->packet_fwd);
2086 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2087 * Used to start communication flows in network loopback test configurations.
2090 run_one_txonly_burst_on_core(void *fwd_arg)
2092 struct fwd_lcore *fwd_lc;
2093 struct fwd_lcore tmp_lcore;
2095 fwd_lc = (struct fwd_lcore *) fwd_arg;
2096 tmp_lcore = *fwd_lc;
2097 tmp_lcore.stopped = 1;
2098 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2103 * Launch packet forwarding:
2104 * - Setup per-port forwarding context.
2105 * - launch logical cores with their forwarding configuration.
2108 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2110 port_fwd_begin_t port_fwd_begin;
2115 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2116 if (port_fwd_begin != NULL) {
2117 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2118 (*port_fwd_begin)(fwd_ports_ids[i]);
2120 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2121 lc_id = fwd_lcores_cpuids[i];
2122 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2123 fwd_lcores[i]->stopped = 0;
2124 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2125 fwd_lcores[i], lc_id);
2127 printf("launch lcore %u failed - diag=%d\n",
2134 * Launch packet forwarding configuration.
2137 start_packet_forwarding(int with_tx_first)
2139 port_fwd_begin_t port_fwd_begin;
2140 port_fwd_end_t port_fwd_end;
2143 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2144 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2146 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2147 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2149 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2150 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2151 (!nb_rxq || !nb_txq))
2152 rte_exit(EXIT_FAILURE,
2153 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2154 cur_fwd_eng->fwd_mode_name);
2156 if (all_ports_started() == 0) {
2157 printf("Not all ports were started\n");
2160 if (test_done == 0) {
2161 printf("Packet forwarding already started\n");
2169 flush_fwd_rx_queues();
2171 pkt_fwd_config_display(&cur_fwd_config);
2172 rxtx_config_display();
2175 if (with_tx_first) {
2176 port_fwd_begin = tx_only_engine.port_fwd_begin;
2177 if (port_fwd_begin != NULL) {
2178 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2179 (*port_fwd_begin)(fwd_ports_ids[i]);
2181 while (with_tx_first--) {
2182 launch_packet_forwarding(
2183 run_one_txonly_burst_on_core);
2184 rte_eal_mp_wait_lcore();
2186 port_fwd_end = tx_only_engine.port_fwd_end;
2187 if (port_fwd_end != NULL) {
2188 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2189 (*port_fwd_end)(fwd_ports_ids[i]);
2192 launch_packet_forwarding(start_pkt_forward_on_core);
2196 stop_packet_forwarding(void)
2198 port_fwd_end_t port_fwd_end;
2204 printf("Packet forwarding not started\n");
2207 printf("Telling cores to stop...");
2208 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2209 fwd_lcores[lc_id]->stopped = 1;
2210 printf("\nWaiting for lcores to finish...\n");
2211 rte_eal_mp_wait_lcore();
2212 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2213 if (port_fwd_end != NULL) {
2214 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2215 pt_id = fwd_ports_ids[i];
2216 (*port_fwd_end)(pt_id);
2220 fwd_stats_display();
2222 printf("\nDone.\n");
2227 dev_set_link_up(portid_t pid)
2229 if (rte_eth_dev_set_link_up(pid) < 0)
2230 printf("\nSet link up fail.\n");
2234 dev_set_link_down(portid_t pid)
2236 if (rte_eth_dev_set_link_down(pid) < 0)
2237 printf("\nSet link down fail.\n");
2241 all_ports_started(void)
2244 struct rte_port *port;
2246 RTE_ETH_FOREACH_DEV(pi) {
2248 /* Check if there is a port which is not started */
2249 if ((port->port_status != RTE_PORT_STARTED) &&
2250 (port->slave_flag == 0))
2254 /* No port is not started */
2259 port_is_stopped(portid_t port_id)
2261 struct rte_port *port = &ports[port_id];
2263 if ((port->port_status != RTE_PORT_STOPPED) &&
2264 (port->slave_flag == 0))
2270 all_ports_stopped(void)
2274 RTE_ETH_FOREACH_DEV(pi) {
2275 if (!port_is_stopped(pi))
2283 port_is_started(portid_t port_id)
2285 if (port_id_is_invalid(port_id, ENABLED_WARN))
2288 if (ports[port_id].port_status != RTE_PORT_STARTED)
2294 /* Configure the Rx and Tx hairpin queues for the selected port. */
2296 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2299 struct rte_eth_hairpin_conf hairpin_conf = {
2304 struct rte_port *port = &ports[pi];
2305 uint16_t peer_rx_port = pi;
2306 uint16_t peer_tx_port = pi;
2307 uint32_t manual = 1;
2308 uint32_t tx_exp = hairpin_mode & 0x10;
2310 if (!(hairpin_mode & 0xf)) {
2314 } else if (hairpin_mode & 0x1) {
2315 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2316 RTE_ETH_DEV_NO_OWNER);
2317 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2318 peer_tx_port = rte_eth_find_next_owned_by(0,
2319 RTE_ETH_DEV_NO_OWNER);
2320 if (p_pi != RTE_MAX_ETHPORTS) {
2321 peer_rx_port = p_pi;
2325 /* Last port will be the peer RX port of the first. */
2326 RTE_ETH_FOREACH_DEV(next_pi)
2327 peer_rx_port = next_pi;
2330 } else if (hairpin_mode & 0x2) {
2332 peer_rx_port = p_pi;
2334 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2335 RTE_ETH_DEV_NO_OWNER);
2336 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2339 peer_tx_port = peer_rx_port;
2343 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2344 hairpin_conf.peers[0].port = peer_rx_port;
2345 hairpin_conf.peers[0].queue = i + nb_rxq;
2346 hairpin_conf.manual_bind = !!manual;
2347 hairpin_conf.tx_explicit = !!tx_exp;
2348 diag = rte_eth_tx_hairpin_queue_setup
2349 (pi, qi, nb_txd, &hairpin_conf);
2354 /* Fail to setup rx queue, return */
2355 if (rte_atomic16_cmpset(&(port->port_status),
2357 RTE_PORT_STOPPED) == 0)
2358 printf("Port %d can not be set back "
2359 "to stopped\n", pi);
2360 printf("Fail to configure port %d hairpin "
2362 /* try to reconfigure queues next time */
2363 port->need_reconfig_queues = 1;
2366 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2367 hairpin_conf.peers[0].port = peer_tx_port;
2368 hairpin_conf.peers[0].queue = i + nb_txq;
2369 hairpin_conf.manual_bind = !!manual;
2370 hairpin_conf.tx_explicit = !!tx_exp;
2371 diag = rte_eth_rx_hairpin_queue_setup
2372 (pi, qi, nb_rxd, &hairpin_conf);
2377 /* Fail to setup rx queue, return */
2378 if (rte_atomic16_cmpset(&(port->port_status),
2380 RTE_PORT_STOPPED) == 0)
2381 printf("Port %d can not be set back "
2382 "to stopped\n", pi);
2383 printf("Fail to configure port %d hairpin "
2385 /* try to reconfigure queues next time */
2386 port->need_reconfig_queues = 1;
2392 /* Configure the Rx with optional split. */
2394 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2395 uint16_t nb_rx_desc, unsigned int socket_id,
2396 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2398 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2399 unsigned int i, mp_n;
2402 if (rx_pkt_nb_segs <= 1 ||
2403 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2404 rx_conf->rx_seg = NULL;
2405 rx_conf->rx_nseg = 0;
2406 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2407 nb_rx_desc, socket_id,
2411 for (i = 0; i < rx_pkt_nb_segs; i++) {
2412 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2413 struct rte_mempool *mpx;
2415 * Use last valid pool for the segments with number
2416 * exceeding the pool index.
2418 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2419 mpx = mbuf_pool_find(socket_id, mp_n);
2420 /* Handle zero as mbuf data buffer size. */
2421 rx_seg->length = rx_pkt_seg_lengths[i] ?
2422 rx_pkt_seg_lengths[i] :
2423 mbuf_data_size[mp_n];
2424 rx_seg->offset = i < rx_pkt_nb_offs ?
2425 rx_pkt_seg_offsets[i] : 0;
2426 rx_seg->mp = mpx ? mpx : mp;
2428 rx_conf->rx_nseg = rx_pkt_nb_segs;
2429 rx_conf->rx_seg = rx_useg;
2430 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2431 socket_id, rx_conf, NULL);
2432 rx_conf->rx_seg = NULL;
2433 rx_conf->rx_nseg = 0;
2438 start_port(portid_t pid)
2440 int diag, need_check_link_status = -1;
2442 portid_t p_pi = RTE_MAX_ETHPORTS;
2443 portid_t pl[RTE_MAX_ETHPORTS];
2444 portid_t peer_pl[RTE_MAX_ETHPORTS];
2445 uint16_t cnt_pi = 0;
2446 uint16_t cfg_pi = 0;
2449 struct rte_port *port;
2450 struct rte_ether_addr mac_addr;
2451 struct rte_eth_hairpin_cap cap;
2453 if (port_id_is_invalid(pid, ENABLED_WARN))
2456 RTE_ETH_FOREACH_DEV(pi) {
2457 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2460 need_check_link_status = 0;
2462 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2463 RTE_PORT_HANDLING) == 0) {
2464 printf("Port %d is now not stopped\n", pi);
2468 if (port->need_reconfig > 0) {
2469 port->need_reconfig = 0;
2471 if (flow_isolate_all) {
2472 int ret = port_flow_isolate(pi, 1);
2474 printf("Failed to apply isolated"
2475 " mode on port %d\n", pi);
2479 configure_rxtx_dump_callbacks(0);
2480 printf("Configuring Port %d (socket %u)\n", pi,
2482 if (nb_hairpinq > 0 &&
2483 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2484 printf("Port %d doesn't support hairpin "
2488 /* configure port */
2489 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2490 nb_txq + nb_hairpinq,
2493 if (rte_atomic16_cmpset(&(port->port_status),
2494 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2495 printf("Port %d can not be set back "
2496 "to stopped\n", pi);
2497 printf("Fail to configure port %d\n", pi);
2498 /* try to reconfigure port next time */
2499 port->need_reconfig = 1;
2503 if (port->need_reconfig_queues > 0) {
2504 port->need_reconfig_queues = 0;
2505 /* setup tx queues */
2506 for (qi = 0; qi < nb_txq; qi++) {
2507 if ((numa_support) &&
2508 (txring_numa[pi] != NUMA_NO_CONFIG))
2509 diag = rte_eth_tx_queue_setup(pi, qi,
2510 port->nb_tx_desc[qi],
2512 &(port->tx_conf[qi]));
2514 diag = rte_eth_tx_queue_setup(pi, qi,
2515 port->nb_tx_desc[qi],
2517 &(port->tx_conf[qi]));
2522 /* Fail to setup tx queue, return */
2523 if (rte_atomic16_cmpset(&(port->port_status),
2525 RTE_PORT_STOPPED) == 0)
2526 printf("Port %d can not be set back "
2527 "to stopped\n", pi);
2528 printf("Fail to configure port %d tx queues\n",
2530 /* try to reconfigure queues next time */
2531 port->need_reconfig_queues = 1;
2534 for (qi = 0; qi < nb_rxq; qi++) {
2535 /* setup rx queues */
2536 if ((numa_support) &&
2537 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2538 struct rte_mempool * mp =
2540 (rxring_numa[pi], 0);
2542 printf("Failed to setup RX queue:"
2543 "No mempool allocation"
2544 " on the socket %d\n",
2549 diag = rx_queue_setup(pi, qi,
2550 port->nb_rx_desc[qi],
2552 &(port->rx_conf[qi]),
2555 struct rte_mempool *mp =
2557 (port->socket_id, 0);
2559 printf("Failed to setup RX queue:"
2560 "No mempool allocation"
2561 " on the socket %d\n",
2565 diag = rx_queue_setup(pi, qi,
2566 port->nb_rx_desc[qi],
2568 &(port->rx_conf[qi]),
2574 /* Fail to setup rx queue, return */
2575 if (rte_atomic16_cmpset(&(port->port_status),
2577 RTE_PORT_STOPPED) == 0)
2578 printf("Port %d can not be set back "
2579 "to stopped\n", pi);
2580 printf("Fail to configure port %d rx queues\n",
2582 /* try to reconfigure queues next time */
2583 port->need_reconfig_queues = 1;
2586 /* setup hairpin queues */
2587 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2590 configure_rxtx_dump_callbacks(verbose_level);
2592 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2596 "Port %d: Failed to disable Ptype parsing\n",
2604 diag = rte_eth_dev_start(pi);
2606 printf("Fail to start port %d: %s\n", pi,
2607 rte_strerror(-diag));
2609 /* Fail to setup rx queue, return */
2610 if (rte_atomic16_cmpset(&(port->port_status),
2611 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2612 printf("Port %d can not be set back to "
2617 if (rte_atomic16_cmpset(&(port->port_status),
2618 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2619 printf("Port %d can not be set into started\n", pi);
2621 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2622 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2623 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2624 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2625 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2627 /* at least one port started, need checking link status */
2628 need_check_link_status = 1;
2633 if (need_check_link_status == 1 && !no_link_check)
2634 check_all_ports_link_status(RTE_PORT_ALL);
2635 else if (need_check_link_status == 0)
2636 printf("Please stop the ports first\n");
2638 if (hairpin_mode & 0xf) {
2642 /* bind all started hairpin ports */
2643 for (i = 0; i < cfg_pi; i++) {
2645 /* bind current Tx to all peer Rx */
2646 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2647 RTE_MAX_ETHPORTS, 1);
2650 for (j = 0; j < peer_pi; j++) {
2651 if (!port_is_started(peer_pl[j]))
2653 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2655 printf("Error during binding hairpin"
2656 " Tx port %u to %u: %s\n",
2658 rte_strerror(-diag));
2662 /* bind all peer Tx to current Rx */
2663 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2664 RTE_MAX_ETHPORTS, 0);
2667 for (j = 0; j < peer_pi; j++) {
2668 if (!port_is_started(peer_pl[j]))
2670 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2672 printf("Error during binding hairpin"
2673 " Tx port %u to %u: %s\n",
2675 rte_strerror(-diag));
2687 stop_port(portid_t pid)
2690 struct rte_port *port;
2691 int need_check_link_status = 0;
2692 portid_t peer_pl[RTE_MAX_ETHPORTS];
2695 if (port_id_is_invalid(pid, ENABLED_WARN))
2698 printf("Stopping ports...\n");
2700 RTE_ETH_FOREACH_DEV(pi) {
2701 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2704 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2705 printf("Please remove port %d from forwarding configuration.\n", pi);
2709 if (port_is_bonding_slave(pi)) {
2710 printf("Please remove port %d from bonded device.\n", pi);
2715 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2716 RTE_PORT_HANDLING) == 0)
2719 if (hairpin_mode & 0xf) {
2722 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2723 /* unbind all peer Tx from current Rx */
2724 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2725 RTE_MAX_ETHPORTS, 0);
2728 for (j = 0; j < peer_pi; j++) {
2729 if (!port_is_started(peer_pl[j]))
2731 rte_eth_hairpin_unbind(peer_pl[j], pi);
2735 if (port->flow_list)
2736 port_flow_flush(pi);
2738 if (rte_eth_dev_stop(pi) != 0)
2739 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2742 if (rte_atomic16_cmpset(&(port->port_status),
2743 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2744 printf("Port %d can not be set into stopped\n", pi);
2745 need_check_link_status = 1;
2747 if (need_check_link_status && !no_link_check)
2748 check_all_ports_link_status(RTE_PORT_ALL);
2754 remove_invalid_ports_in(portid_t *array, portid_t *total)
2757 portid_t new_total = 0;
2759 for (i = 0; i < *total; i++)
2760 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2761 array[new_total] = array[i];
2768 remove_invalid_ports(void)
2770 remove_invalid_ports_in(ports_ids, &nb_ports);
2771 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2772 nb_cfg_ports = nb_fwd_ports;
2776 close_port(portid_t pid)
2779 struct rte_port *port;
2781 if (port_id_is_invalid(pid, ENABLED_WARN))
2784 printf("Closing ports...\n");
2786 RTE_ETH_FOREACH_DEV(pi) {
2787 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2790 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2791 printf("Please remove port %d from forwarding configuration.\n", pi);
2795 if (port_is_bonding_slave(pi)) {
2796 printf("Please remove port %d from bonded device.\n", pi);
2801 if (rte_atomic16_cmpset(&(port->port_status),
2802 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2803 printf("Port %d is already closed\n", pi);
2807 port_flow_flush(pi);
2808 rte_eth_dev_close(pi);
2811 remove_invalid_ports();
2816 reset_port(portid_t pid)
2820 struct rte_port *port;
2822 if (port_id_is_invalid(pid, ENABLED_WARN))
2825 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2826 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2827 printf("Can not reset port(s), please stop port(s) first.\n");
2831 printf("Resetting ports...\n");
2833 RTE_ETH_FOREACH_DEV(pi) {
2834 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2837 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2838 printf("Please remove port %d from forwarding "
2839 "configuration.\n", pi);
2843 if (port_is_bonding_slave(pi)) {
2844 printf("Please remove port %d from bonded device.\n",
2849 diag = rte_eth_dev_reset(pi);
2852 port->need_reconfig = 1;
2853 port->need_reconfig_queues = 1;
2855 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2863 attach_port(char *identifier)
2866 struct rte_dev_iterator iterator;
2868 printf("Attaching a new port...\n");
2870 if (identifier == NULL) {
2871 printf("Invalid parameters are specified\n");
2875 if (rte_dev_probe(identifier) < 0) {
2876 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2880 /* first attach mode: event */
2881 if (setup_on_probe_event) {
2882 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2883 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2884 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2885 ports[pi].need_setup != 0)
2886 setup_attached_port(pi);
2890 /* second attach mode: iterator */
2891 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2892 /* setup ports matching the devargs used for probing */
2893 if (port_is_forwarding(pi))
2894 continue; /* port was already attached before */
2895 setup_attached_port(pi);
2900 setup_attached_port(portid_t pi)
2902 unsigned int socket_id;
2905 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2906 /* if socket_id is invalid, set to the first available socket. */
2907 if (check_socket_id(socket_id) < 0)
2908 socket_id = socket_ids[0];
2909 reconfig(pi, socket_id);
2910 ret = rte_eth_promiscuous_enable(pi);
2912 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2913 pi, rte_strerror(-ret));
2915 ports_ids[nb_ports++] = pi;
2916 fwd_ports_ids[nb_fwd_ports++] = pi;
2917 nb_cfg_ports = nb_fwd_ports;
2918 ports[pi].need_setup = 0;
2919 ports[pi].port_status = RTE_PORT_STOPPED;
2921 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2926 detach_device(struct rte_device *dev)
2931 printf("Device already removed\n");
2935 printf("Removing a device...\n");
2937 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2938 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2939 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
2940 printf("Port %u not stopped\n", sibling);
2943 port_flow_flush(sibling);
2947 if (rte_dev_remove(dev) < 0) {
2948 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2951 remove_invalid_ports();
2953 printf("Device is detached\n");
2954 printf("Now total ports is %d\n", nb_ports);
2960 detach_port_device(portid_t port_id)
2962 if (port_id_is_invalid(port_id, ENABLED_WARN))
2965 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2966 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2967 printf("Port not stopped\n");
2970 printf("Port was not closed\n");
2973 detach_device(rte_eth_devices[port_id].device);
2977 detach_devargs(char *identifier)
2979 struct rte_dev_iterator iterator;
2980 struct rte_devargs da;
2983 printf("Removing a device...\n");
2985 memset(&da, 0, sizeof(da));
2986 if (rte_devargs_parsef(&da, "%s", identifier)) {
2987 printf("cannot parse identifier\n");
2991 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2992 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2993 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2994 printf("Port %u not stopped\n", port_id);
2995 rte_eth_iterator_cleanup(&iterator);
2996 rte_devargs_reset(&da);
2999 port_flow_flush(port_id);
3003 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3004 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3005 da.name, da.bus->name);
3006 rte_devargs_reset(&da);
3010 remove_invalid_ports();
3012 printf("Device %s is detached\n", identifier);
3013 printf("Now total ports is %d\n", nb_ports);
3015 rte_devargs_reset(&da);
3026 stop_packet_forwarding();
3028 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3030 if (mp_alloc_type == MP_ALLOC_ANON)
3031 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3035 if (ports != NULL) {
3037 RTE_ETH_FOREACH_DEV(pt_id) {
3038 printf("\nStopping port %d...\n", pt_id);
3042 RTE_ETH_FOREACH_DEV(pt_id) {
3043 printf("\nShutting down port %d...\n", pt_id);
3050 ret = rte_dev_event_monitor_stop();
3053 "fail to stop device event monitor.");
3057 ret = rte_dev_event_callback_unregister(NULL,
3058 dev_event_callback, NULL);
3061 "fail to unregister device event callback.\n");
3065 ret = rte_dev_hotplug_handle_disable();
3068 "fail to disable hotplug handling.\n");
3072 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3074 rte_mempool_free(mempools[i]);
3077 printf("\nBye...\n");
3080 typedef void (*cmd_func_t)(void);
3081 struct pmd_test_command {
3082 const char *cmd_name;
3083 cmd_func_t cmd_func;
3086 /* Check the link status of all ports in up to 9s, and print them finally */
3088 check_all_ports_link_status(uint32_t port_mask)
3090 #define CHECK_INTERVAL 100 /* 100ms */
3091 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3093 uint8_t count, all_ports_up, print_flag = 0;
3094 struct rte_eth_link link;
3096 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3098 printf("Checking link statuses...\n");
3100 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3102 RTE_ETH_FOREACH_DEV(portid) {
3103 if ((port_mask & (1 << portid)) == 0)
3105 memset(&link, 0, sizeof(link));
3106 ret = rte_eth_link_get_nowait(portid, &link);
3109 if (print_flag == 1)
3110 printf("Port %u link get failed: %s\n",
3111 portid, rte_strerror(-ret));
3114 /* print link status if flag set */
3115 if (print_flag == 1) {
3116 rte_eth_link_to_str(link_status,
3117 sizeof(link_status), &link);
3118 printf("Port %d %s\n", portid, link_status);
3121 /* clear all_ports_up flag if any link down */
3122 if (link.link_status == ETH_LINK_DOWN) {
3127 /* after finally printing all link status, get out */
3128 if (print_flag == 1)
3131 if (all_ports_up == 0) {
3133 rte_delay_ms(CHECK_INTERVAL);
3136 /* set the print_flag if all ports up or timeout */
3137 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3147 rmv_port_callback(void *arg)
3149 int need_to_start = 0;
3150 int org_no_link_check = no_link_check;
3151 portid_t port_id = (intptr_t)arg;
3152 struct rte_device *dev;
3154 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3156 if (!test_done && port_is_forwarding(port_id)) {
3158 stop_packet_forwarding();
3162 no_link_check = org_no_link_check;
3164 /* Save rte_device pointer before closing ethdev port */
3165 dev = rte_eth_devices[port_id].device;
3166 close_port(port_id);
3167 detach_device(dev); /* might be already removed or have more ports */
3170 start_packet_forwarding(0);
3173 /* This function is used by the interrupt thread */
3175 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3178 RTE_SET_USED(param);
3179 RTE_SET_USED(ret_param);
3181 if (type >= RTE_ETH_EVENT_MAX) {
3182 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3183 port_id, __func__, type);
3185 } else if (event_print_mask & (UINT32_C(1) << type)) {
3186 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3187 eth_event_desc[type]);
3192 case RTE_ETH_EVENT_NEW:
3193 ports[port_id].need_setup = 1;
3194 ports[port_id].port_status = RTE_PORT_HANDLING;
3196 case RTE_ETH_EVENT_INTR_RMV:
3197 if (port_id_is_invalid(port_id, DISABLED_WARN))
3199 if (rte_eal_alarm_set(100000,
3200 rmv_port_callback, (void *)(intptr_t)port_id))
3201 fprintf(stderr, "Could not set up deferred device removal\n");
3203 case RTE_ETH_EVENT_DESTROY:
3204 ports[port_id].port_status = RTE_PORT_CLOSED;
3205 printf("Port %u is closed\n", port_id);
3214 register_eth_event_callback(void)
3217 enum rte_eth_event_type event;
3219 for (event = RTE_ETH_EVENT_UNKNOWN;
3220 event < RTE_ETH_EVENT_MAX; event++) {
3221 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3226 TESTPMD_LOG(ERR, "Failed to register callback for "
3227 "%s event\n", eth_event_desc[event]);
3235 /* This function is used by the interrupt thread */
3237 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3238 __rte_unused void *arg)
3243 if (type >= RTE_DEV_EVENT_MAX) {
3244 fprintf(stderr, "%s called upon invalid event %d\n",
3250 case RTE_DEV_EVENT_REMOVE:
3251 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3253 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3255 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3260 * Because the user's callback is invoked in eal interrupt
3261 * callback, the interrupt callback need to be finished before
3262 * it can be unregistered when detaching device. So finish
3263 * callback soon and use a deferred removal to detach device
3264 * is need. It is a workaround, once the device detaching be
3265 * moved into the eal in the future, the deferred removal could
3268 if (rte_eal_alarm_set(100000,
3269 rmv_port_callback, (void *)(intptr_t)port_id))
3271 "Could not set up deferred device removal\n");
3273 case RTE_DEV_EVENT_ADD:
3274 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3276 /* TODO: After finish kernel driver binding,
3277 * begin to attach port.
3286 rxtx_port_config(struct rte_port *port)
3291 for (qid = 0; qid < nb_rxq; qid++) {
3292 offloads = port->rx_conf[qid].offloads;
3293 port->rx_conf[qid] = port->dev_info.default_rxconf;
3295 port->rx_conf[qid].offloads = offloads;
3297 /* Check if any Rx parameters have been passed */
3298 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3299 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3301 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3302 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3304 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3305 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3307 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3308 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3310 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3311 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3313 port->nb_rx_desc[qid] = nb_rxd;
3316 for (qid = 0; qid < nb_txq; qid++) {
3317 offloads = port->tx_conf[qid].offloads;
3318 port->tx_conf[qid] = port->dev_info.default_txconf;
3320 port->tx_conf[qid].offloads = offloads;
3322 /* Check if any Tx parameters have been passed */
3323 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3324 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3326 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3327 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3329 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3330 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3332 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3333 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3335 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3336 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3338 port->nb_tx_desc[qid] = nb_txd;
3343 * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
3344 * MTU is also aligned if JUMBO_FRAME offload is not set.
3346 * port->dev_info should be set before calling this function.
3348 * return 0 on success, negative on error
3351 update_jumbo_frame_offload(portid_t portid)
3353 struct rte_port *port = &ports[portid];
3354 uint32_t eth_overhead;
3355 uint64_t rx_offloads;
3359 /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
3360 if (port->dev_info.max_mtu != UINT16_MAX &&
3361 port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
3362 eth_overhead = port->dev_info.max_rx_pktlen -
3363 port->dev_info.max_mtu;
3365 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3367 rx_offloads = port->dev_conf.rxmode.offloads;
3369 /* Default config value is 0 to use PMD specific overhead */
3370 if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
3371 port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
3373 if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
3374 rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3377 if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3378 printf("Frame size (%u) is not supported by port %u\n",
3379 port->dev_conf.rxmode.max_rx_pkt_len,
3383 rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3387 if (rx_offloads != port->dev_conf.rxmode.offloads) {
3390 port->dev_conf.rxmode.offloads = rx_offloads;
3392 /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
3393 for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
3395 port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3397 port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3401 /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
3402 * if unset do it here
3404 if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3405 ret = rte_eth_dev_set_mtu(portid,
3406 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
3408 printf("Failed to set MTU to %u for port %u\n",
3409 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
3417 init_port_config(void)
3420 struct rte_port *port;
3423 RTE_ETH_FOREACH_DEV(pid) {
3425 port->dev_conf.fdir_conf = fdir_conf;
3427 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3432 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3433 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3434 rss_hf & port->dev_info.flow_type_rss_offloads;
3436 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3437 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3440 if (port->dcb_flag == 0) {
3441 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3442 port->dev_conf.rxmode.mq_mode =
3443 (enum rte_eth_rx_mq_mode)
3444 (rx_mq_mode & ETH_MQ_RX_RSS);
3446 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3449 rxtx_port_config(port);
3451 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3455 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3456 rte_pmd_ixgbe_bypass_init(pid);
3459 if (lsc_interrupt &&
3460 (rte_eth_devices[pid].data->dev_flags &
3461 RTE_ETH_DEV_INTR_LSC))
3462 port->dev_conf.intr_conf.lsc = 1;
3463 if (rmv_interrupt &&
3464 (rte_eth_devices[pid].data->dev_flags &
3465 RTE_ETH_DEV_INTR_RMV))
3466 port->dev_conf.intr_conf.rmv = 1;
3470 void set_port_slave_flag(portid_t slave_pid)
3472 struct rte_port *port;
3474 port = &ports[slave_pid];
3475 port->slave_flag = 1;
3478 void clear_port_slave_flag(portid_t slave_pid)
3480 struct rte_port *port;
3482 port = &ports[slave_pid];
3483 port->slave_flag = 0;
3486 uint8_t port_is_bonding_slave(portid_t slave_pid)
3488 struct rte_port *port;
3490 port = &ports[slave_pid];
3491 if ((rte_eth_devices[slave_pid].data->dev_flags &
3492 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3497 const uint16_t vlan_tags[] = {
3498 0, 1, 2, 3, 4, 5, 6, 7,
3499 8, 9, 10, 11, 12, 13, 14, 15,
3500 16, 17, 18, 19, 20, 21, 22, 23,
3501 24, 25, 26, 27, 28, 29, 30, 31
3505 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3506 enum dcb_mode_enable dcb_mode,
3507 enum rte_eth_nb_tcs num_tcs,
3512 struct rte_eth_rss_conf rss_conf;
3515 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3516 * given above, and the number of traffic classes available for use.
3518 if (dcb_mode == DCB_VT_ENABLED) {
3519 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3520 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3521 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3522 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3524 /* VMDQ+DCB RX and TX configurations */
3525 vmdq_rx_conf->enable_default_pool = 0;
3526 vmdq_rx_conf->default_pool = 0;
3527 vmdq_rx_conf->nb_queue_pools =
3528 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3529 vmdq_tx_conf->nb_queue_pools =
3530 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3532 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3533 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3534 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3535 vmdq_rx_conf->pool_map[i].pools =
3536 1 << (i % vmdq_rx_conf->nb_queue_pools);
3538 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3539 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3540 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3543 /* set DCB mode of RX and TX of multiple queues */
3544 eth_conf->rxmode.mq_mode =
3545 (enum rte_eth_rx_mq_mode)
3546 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3547 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3549 struct rte_eth_dcb_rx_conf *rx_conf =
3550 ð_conf->rx_adv_conf.dcb_rx_conf;
3551 struct rte_eth_dcb_tx_conf *tx_conf =
3552 ð_conf->tx_adv_conf.dcb_tx_conf;
3554 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3556 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3560 rx_conf->nb_tcs = num_tcs;
3561 tx_conf->nb_tcs = num_tcs;
3563 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3564 rx_conf->dcb_tc[i] = i % num_tcs;
3565 tx_conf->dcb_tc[i] = i % num_tcs;
3568 eth_conf->rxmode.mq_mode =
3569 (enum rte_eth_rx_mq_mode)
3570 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3571 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3572 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3576 eth_conf->dcb_capability_en =
3577 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3579 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3585 init_port_dcb_config(portid_t pid,
3586 enum dcb_mode_enable dcb_mode,
3587 enum rte_eth_nb_tcs num_tcs,
3590 struct rte_eth_conf port_conf;
3591 struct rte_port *rte_port;
3595 rte_port = &ports[pid];
3597 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3599 port_conf.rxmode = rte_port->dev_conf.rxmode;
3600 port_conf.txmode = rte_port->dev_conf.txmode;
3602 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3603 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3606 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3608 /* re-configure the device . */
3609 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3613 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3617 /* If dev_info.vmdq_pool_base is greater than 0,
3618 * the queue id of vmdq pools is started after pf queues.
3620 if (dcb_mode == DCB_VT_ENABLED &&
3621 rte_port->dev_info.vmdq_pool_base > 0) {
3622 printf("VMDQ_DCB multi-queue mode is nonsensical"
3623 " for port %d.", pid);
3627 /* Assume the ports in testpmd have the same dcb capability
3628 * and has the same number of rxq and txq in dcb mode
3630 if (dcb_mode == DCB_VT_ENABLED) {
3631 if (rte_port->dev_info.max_vfs > 0) {
3632 nb_rxq = rte_port->dev_info.nb_rx_queues;
3633 nb_txq = rte_port->dev_info.nb_tx_queues;
3635 nb_rxq = rte_port->dev_info.max_rx_queues;
3636 nb_txq = rte_port->dev_info.max_tx_queues;
3639 /*if vt is disabled, use all pf queues */
3640 if (rte_port->dev_info.vmdq_pool_base == 0) {
3641 nb_rxq = rte_port->dev_info.max_rx_queues;
3642 nb_txq = rte_port->dev_info.max_tx_queues;
3644 nb_rxq = (queueid_t)num_tcs;
3645 nb_txq = (queueid_t)num_tcs;
3649 rx_free_thresh = 64;
3651 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3653 rxtx_port_config(rte_port);
3655 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3656 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3657 rx_vft_set(pid, vlan_tags[i], 1);
3659 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3663 rte_port->dcb_flag = 1;
3665 /* Enter DCB configuration status */
3676 /* Configuration of Ethernet ports. */
3677 ports = rte_zmalloc("testpmd: ports",
3678 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3679 RTE_CACHE_LINE_SIZE);
3680 if (ports == NULL) {
3681 rte_exit(EXIT_FAILURE,
3682 "rte_zmalloc(%d struct rte_port) failed\n",
3685 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3686 LIST_INIT(&ports[i].flow_tunnel_list);
3687 /* Initialize ports NUMA structures */
3688 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3689 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3690 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3704 const char clr[] = { 27, '[', '2', 'J', '\0' };
3705 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3707 /* Clear screen and move to top left */
3708 printf("%s%s", clr, top_left);
3710 printf("\nPort statistics ====================================");
3711 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3712 nic_stats_display(fwd_ports_ids[i]);
3718 signal_handler(int signum)
3720 if (signum == SIGINT || signum == SIGTERM) {
3721 printf("\nSignal %d received, preparing to exit...\n",
3723 #ifdef RTE_LIB_PDUMP
3724 /* uninitialize packet capture framework */
3727 #ifdef RTE_LIB_LATENCYSTATS
3728 if (latencystats_enabled != 0)
3729 rte_latencystats_uninit();
3732 /* Set flag to indicate the force termination. */
3734 /* exit with the expected status */
3735 signal(signum, SIG_DFL);
3736 kill(getpid(), signum);
3741 main(int argc, char** argv)
3748 signal(SIGINT, signal_handler);
3749 signal(SIGTERM, signal_handler);
3751 testpmd_logtype = rte_log_register("testpmd");
3752 if (testpmd_logtype < 0)
3753 rte_exit(EXIT_FAILURE, "Cannot register log type");
3754 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3756 diag = rte_eal_init(argc, argv);
3758 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3759 rte_strerror(rte_errno));
3761 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3762 rte_exit(EXIT_FAILURE,
3763 "Secondary process type not supported.\n");
3765 ret = register_eth_event_callback();
3767 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3769 #ifdef RTE_LIB_PDUMP
3770 /* initialize packet capture framework */
3775 RTE_ETH_FOREACH_DEV(port_id) {
3776 ports_ids[count] = port_id;
3779 nb_ports = (portid_t) count;
3781 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3783 /* allocate port structures, and init them */
3786 set_def_fwd_config();
3788 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3789 "Check the core mask argument\n");
3791 /* Bitrate/latency stats disabled by default */
3792 #ifdef RTE_LIB_BITRATESTATS
3793 bitrate_enabled = 0;
3795 #ifdef RTE_LIB_LATENCYSTATS
3796 latencystats_enabled = 0;
3799 /* on FreeBSD, mlockall() is disabled by default */
3800 #ifdef RTE_EXEC_ENV_FREEBSD
3809 launch_args_parse(argc, argv);
3811 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3812 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3816 if (tx_first && interactive)
3817 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3818 "interactive mode.\n");
3820 if (tx_first && lsc_interrupt) {
3821 printf("Warning: lsc_interrupt needs to be off when "
3822 " using tx_first. Disabling.\n");
3826 if (!nb_rxq && !nb_txq)
3827 printf("Warning: Either rx or tx queues should be non-zero\n");
3829 if (nb_rxq > 1 && nb_rxq > nb_txq)
3830 printf("Warning: nb_rxq=%d enables RSS configuration, "
3831 "but nb_txq=%d will prevent to fully test it.\n",
3837 ret = rte_dev_hotplug_handle_enable();
3840 "fail to enable hotplug handling.");
3844 ret = rte_dev_event_monitor_start();
3847 "fail to start device event monitoring.");
3851 ret = rte_dev_event_callback_register(NULL,
3852 dev_event_callback, NULL);
3855 "fail to register device event callback\n");
3860 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3861 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3863 /* set all ports to promiscuous mode by default */
3864 RTE_ETH_FOREACH_DEV(port_id) {
3865 ret = rte_eth_promiscuous_enable(port_id);
3867 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3868 port_id, rte_strerror(-ret));
3871 /* Init metrics library */
3872 rte_metrics_init(rte_socket_id());
3874 #ifdef RTE_LIB_LATENCYSTATS
3875 if (latencystats_enabled != 0) {
3876 int ret = rte_latencystats_init(1, NULL);
3878 printf("Warning: latencystats init()"
3879 " returned error %d\n", ret);
3880 printf("Latencystats running on lcore %d\n",
3881 latencystats_lcore_id);
3885 /* Setup bitrate stats */
3886 #ifdef RTE_LIB_BITRATESTATS
3887 if (bitrate_enabled != 0) {
3888 bitrate_data = rte_stats_bitrate_create();
3889 if (bitrate_data == NULL)
3890 rte_exit(EXIT_FAILURE,
3891 "Could not allocate bitrate data.\n");
3892 rte_stats_bitrate_reg(bitrate_data);
3896 #ifdef RTE_LIB_CMDLINE
3897 if (strlen(cmdline_filename) != 0)
3898 cmdline_read_from_file(cmdline_filename);
3900 if (interactive == 1) {
3902 printf("Start automatic packet forwarding\n");
3903 start_packet_forwarding(0);
3915 printf("No commandline core given, start packet forwarding\n");
3916 start_packet_forwarding(tx_first);
3917 if (stats_period != 0) {
3918 uint64_t prev_time = 0, cur_time, diff_time = 0;
3919 uint64_t timer_period;
3921 /* Convert to number of cycles */
3922 timer_period = stats_period * rte_get_timer_hz();
3924 while (f_quit == 0) {
3925 cur_time = rte_get_timer_cycles();
3926 diff_time += cur_time - prev_time;
3928 if (diff_time >= timer_period) {
3930 /* Reset the timer */
3933 /* Sleep to avoid unnecessary checks */
3934 prev_time = cur_time;
3939 printf("Press enter to exit\n");
3940 rc = read(0, &c, 1);
3946 ret = rte_eal_cleanup();
3948 rte_exit(EXIT_FAILURE,
3949 "EAL cleanup failed: %s\n", strerror(-ret));
3951 return EXIT_SUCCESS;