1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
50 #include <rte_pmd_ixgbe.h>
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIB_BITRATESTATS
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIB_LATENCYSTATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use main core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 &five_tuple_swap_fwd_engine,
183 #ifdef RTE_LIBRTE_IEEE1588
184 &ieee1588_fwd_engine,
189 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
190 uint16_t mempool_flags;
192 struct fwd_config cur_fwd_config;
193 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194 uint32_t retry_enabled;
195 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
198 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
199 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
200 DEFAULT_MBUF_DATA_SIZE
201 }; /**< Mbuf data space size. */
202 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
203 * specified on command-line. */
204 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
207 * In container, it cannot terminate the process which running with 'stats-period'
208 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
213 * Configuration of packet segments used to scatter received packets
214 * if some of split features is configured.
216 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
217 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
218 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
219 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
222 * Configuration of packet segments used by the "txonly" processing engine.
224 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
225 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
226 TXONLY_DEF_PACKET_LEN,
228 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
230 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
231 /**< Split policy for packets to TX. */
233 uint8_t txonly_multi_flow;
234 /**< Whether multiple flows are generated in TXONLY mode. */
236 uint32_t tx_pkt_times_inter;
237 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
239 uint32_t tx_pkt_times_intra;
240 /**< Timings for send scheduling in TXONLY mode, time between packets. */
242 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
243 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
245 /* current configuration is in DCB or not,0 means it is not in DCB mode */
246 uint8_t dcb_config = 0;
248 /* Whether the dcb is in testing status */
249 uint8_t dcb_test = 0;
252 * Configurable number of RX/TX queues.
254 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
255 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
256 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
259 * Configurable number of RX/TX ring descriptors.
260 * Defaults are supplied by drivers via ethdev.
262 #define RTE_TEST_RX_DESC_DEFAULT 0
263 #define RTE_TEST_TX_DESC_DEFAULT 0
264 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
265 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
267 #define RTE_PMD_PARAM_UNSET -1
269 * Configurable values of RX and TX ring threshold registers.
272 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
273 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
274 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
276 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
277 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
278 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
281 * Configurable value of RX free threshold.
283 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
286 * Configurable value of RX drop enable.
288 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
291 * Configurable value of TX free threshold.
293 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
296 * Configurable value of TX RS bit threshold.
298 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
301 * Configurable value of buffered packets before sending.
303 uint16_t noisy_tx_sw_bufsz;
306 * Configurable value of packet buffer timeout.
308 uint16_t noisy_tx_sw_buf_flush_time;
311 * Configurable value for size of VNF internal memory area
312 * used for simulating noisy neighbour behaviour
314 uint64_t noisy_lkup_mem_sz;
317 * Configurable value of number of random writes done in
318 * VNF simulation memory area.
320 uint64_t noisy_lkup_num_writes;
323 * Configurable value of number of random reads done in
324 * VNF simulation memory area.
326 uint64_t noisy_lkup_num_reads;
329 * Configurable value of number of random reads/writes done in
330 * VNF simulation memory area.
332 uint64_t noisy_lkup_num_reads_writes;
335 * Receive Side Scaling (RSS) configuration.
337 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
340 * Port topology configuration
342 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
345 * Avoids to flush all the RX streams before starts forwarding.
347 uint8_t no_flush_rx = 0; /* flush by default */
350 * Flow API isolated mode.
352 uint8_t flow_isolate_all;
355 * Avoids to check link status when starting/stopping a port.
357 uint8_t no_link_check = 0; /* check by default */
360 * Don't automatically start all ports in interactive mode.
362 uint8_t no_device_start = 0;
365 * Enable link status change notification
367 uint8_t lsc_interrupt = 1; /* enabled by default */
370 * Enable device removal notification.
372 uint8_t rmv_interrupt = 1; /* enabled by default */
374 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
376 /* After attach, port setup is called on event or by iterator */
377 bool setup_on_probe_event = true;
379 /* Clear ptypes on port initialization. */
380 uint8_t clear_ptypes = true;
382 /* Hairpin ports configuration mode. */
383 uint16_t hairpin_mode;
385 /* Pretty printing of ethdev events */
386 static const char * const eth_event_desc[] = {
387 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
388 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
389 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
390 [RTE_ETH_EVENT_INTR_RESET] = "reset",
391 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
392 [RTE_ETH_EVENT_IPSEC] = "IPsec",
393 [RTE_ETH_EVENT_MACSEC] = "MACsec",
394 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
395 [RTE_ETH_EVENT_NEW] = "device probed",
396 [RTE_ETH_EVENT_DESTROY] = "device released",
397 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
398 [RTE_ETH_EVENT_MAX] = NULL,
402 * Display or mask ether events
403 * Default to all events except VF_MBOX
405 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
406 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
407 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
408 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
409 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
410 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
411 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
412 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
414 * Decide if all memory are locked for performance.
419 * NIC bypass mode configuration options.
422 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
423 /* The NIC bypass watchdog timeout. */
424 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
428 #ifdef RTE_LIB_LATENCYSTATS
431 * Set when latency stats is enabled in the commandline
433 uint8_t latencystats_enabled;
436 * Lcore ID to serive latency statistics.
438 lcoreid_t latencystats_lcore_id = -1;
443 * Ethernet device configuration.
445 struct rte_eth_rxmode rx_mode = {
446 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
447 /**< Default maximum frame length. */
450 struct rte_eth_txmode tx_mode = {
451 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
454 struct rte_fdir_conf fdir_conf = {
455 .mode = RTE_FDIR_MODE_NONE,
456 .pballoc = RTE_FDIR_PBALLOC_64K,
457 .status = RTE_FDIR_REPORT_STATUS,
459 .vlan_tci_mask = 0xFFEF,
461 .src_ip = 0xFFFFFFFF,
462 .dst_ip = 0xFFFFFFFF,
465 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
466 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
468 .src_port_mask = 0xFFFF,
469 .dst_port_mask = 0xFFFF,
470 .mac_addr_byte_mask = 0xFF,
471 .tunnel_type_mask = 1,
472 .tunnel_id_mask = 0xFFFFFFFF,
477 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
480 * Display zero values by default for xstats
482 uint8_t xstats_hide_zero;
485 * Measure of CPU cycles disabled by default
487 uint8_t record_core_cycles;
490 * Display of RX and TX bursts disabled by default
492 uint8_t record_burst_stats;
494 unsigned int num_sockets = 0;
495 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
497 #ifdef RTE_LIB_BITRATESTATS
498 /* Bitrate statistics */
499 struct rte_stats_bitrates *bitrate_data;
500 lcoreid_t bitrate_lcore_id;
501 uint8_t bitrate_enabled;
504 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
505 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
508 * hexadecimal bitmask of RX mq mode can be enabled.
510 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
512 /* Forward function declarations */
513 static void setup_attached_port(portid_t pi);
514 static void check_all_ports_link_status(uint32_t port_mask);
515 static int eth_event_callback(portid_t port_id,
516 enum rte_eth_event_type type,
517 void *param, void *ret_param);
518 static void dev_event_callback(const char *device_name,
519 enum rte_dev_event_type type,
523 * Check if all the ports are started.
524 * If yes, return positive value. If not, return zero.
526 static int all_ports_started(void);
528 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
529 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
531 /* Holds the registered mbuf dynamic flags names. */
532 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
535 * Helper function to check if socket is already discovered.
536 * If yes, return positive value. If not, return zero.
539 new_socket_id(unsigned int socket_id)
543 for (i = 0; i < num_sockets; i++) {
544 if (socket_ids[i] == socket_id)
551 * Setup default configuration.
554 set_default_fwd_lcores_config(void)
558 unsigned int sock_num;
561 for (i = 0; i < RTE_MAX_LCORE; i++) {
562 if (!rte_lcore_is_enabled(i))
564 sock_num = rte_lcore_to_socket_id(i);
565 if (new_socket_id(sock_num)) {
566 if (num_sockets >= RTE_MAX_NUMA_NODES) {
567 rte_exit(EXIT_FAILURE,
568 "Total sockets greater than %u\n",
571 socket_ids[num_sockets++] = sock_num;
573 if (i == rte_get_main_lcore())
575 fwd_lcores_cpuids[nb_lc++] = i;
577 nb_lcores = (lcoreid_t) nb_lc;
578 nb_cfg_lcores = nb_lcores;
583 set_def_peer_eth_addrs(void)
587 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
588 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
589 peer_eth_addrs[i].addr_bytes[5] = i;
594 set_default_fwd_ports_config(void)
599 RTE_ETH_FOREACH_DEV(pt_id) {
600 fwd_ports_ids[i++] = pt_id;
602 /* Update sockets info according to the attached device */
603 int socket_id = rte_eth_dev_socket_id(pt_id);
604 if (socket_id >= 0 && new_socket_id(socket_id)) {
605 if (num_sockets >= RTE_MAX_NUMA_NODES) {
606 rte_exit(EXIT_FAILURE,
607 "Total sockets greater than %u\n",
610 socket_ids[num_sockets++] = socket_id;
614 nb_cfg_ports = nb_ports;
615 nb_fwd_ports = nb_ports;
619 set_def_fwd_config(void)
621 set_default_fwd_lcores_config();
622 set_def_peer_eth_addrs();
623 set_default_fwd_ports_config();
626 /* extremely pessimistic estimation of memory required to create a mempool */
628 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
630 unsigned int n_pages, mbuf_per_pg, leftover;
631 uint64_t total_mem, mbuf_mem, obj_sz;
633 /* there is no good way to predict how much space the mempool will
634 * occupy because it will allocate chunks on the fly, and some of those
635 * will come from default DPDK memory while some will come from our
636 * external memory, so just assume 128MB will be enough for everyone.
638 uint64_t hdr_mem = 128 << 20;
640 /* account for possible non-contiguousness */
641 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
643 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
647 mbuf_per_pg = pgsz / obj_sz;
648 leftover = (nb_mbufs % mbuf_per_pg) > 0;
649 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
651 mbuf_mem = n_pages * pgsz;
653 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
655 if (total_mem > SIZE_MAX) {
656 TESTPMD_LOG(ERR, "Memory size too big\n");
659 *out = (size_t)total_mem;
665 pagesz_flags(uint64_t page_sz)
667 /* as per mmap() manpage, all page sizes are log2 of page size
668 * shifted by MAP_HUGE_SHIFT
670 int log2 = rte_log2_u64(page_sz);
672 return (log2 << HUGE_SHIFT);
676 alloc_mem(size_t memsz, size_t pgsz, bool huge)
681 /* allocate anonymous hugepages */
682 flags = MAP_ANONYMOUS | MAP_PRIVATE;
684 flags |= HUGE_FLAG | pagesz_flags(pgsz);
686 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
687 if (addr == MAP_FAILED)
693 struct extmem_param {
697 rte_iova_t *iova_table;
698 unsigned int iova_table_len;
702 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
705 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
706 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
707 unsigned int cur_page, n_pages, pgsz_idx;
708 size_t mem_sz, cur_pgsz;
709 rte_iova_t *iovas = NULL;
713 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
714 /* skip anything that is too big */
715 if (pgsizes[pgsz_idx] > SIZE_MAX)
718 cur_pgsz = pgsizes[pgsz_idx];
720 /* if we were told not to allocate hugepages, override */
722 cur_pgsz = sysconf(_SC_PAGESIZE);
724 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
726 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
730 /* allocate our memory */
731 addr = alloc_mem(mem_sz, cur_pgsz, huge);
733 /* if we couldn't allocate memory with a specified page size,
734 * that doesn't mean we can't do it with other page sizes, so
740 /* store IOVA addresses for every page in this memory area */
741 n_pages = mem_sz / cur_pgsz;
743 iovas = malloc(sizeof(*iovas) * n_pages);
746 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
749 /* lock memory if it's not huge pages */
753 /* populate IOVA addresses */
754 for (cur_page = 0; cur_page < n_pages; cur_page++) {
759 offset = cur_pgsz * cur_page;
760 cur = RTE_PTR_ADD(addr, offset);
762 /* touch the page before getting its IOVA */
763 *(volatile char *)cur = 0;
765 iova = rte_mem_virt2iova(cur);
767 iovas[cur_page] = iova;
772 /* if we couldn't allocate anything */
778 param->pgsz = cur_pgsz;
779 param->iova_table = iovas;
780 param->iova_table_len = n_pages;
787 munmap(addr, mem_sz);
793 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
795 struct extmem_param param;
798 memset(¶m, 0, sizeof(param));
800 /* check if our heap exists */
801 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
803 /* create our heap */
804 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
806 TESTPMD_LOG(ERR, "Cannot create heap\n");
811 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
813 TESTPMD_LOG(ERR, "Cannot create memory area\n");
817 /* we now have a valid memory area, so add it to heap */
818 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
819 param.addr, param.len, param.iova_table,
820 param.iova_table_len, param.pgsz);
822 /* when using VFIO, memory is automatically mapped for DMA by EAL */
824 /* not needed any more */
825 free(param.iova_table);
828 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
829 munmap(param.addr, param.len);
835 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
841 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
842 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
847 RTE_ETH_FOREACH_DEV(pid) {
848 struct rte_eth_dev *dev =
849 &rte_eth_devices[pid];
851 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
855 "unable to DMA unmap addr 0x%p "
857 memhdr->addr, dev->data->name);
860 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
863 "unable to un-register addr 0x%p\n", memhdr->addr);
868 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
869 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
872 size_t page_size = sysconf(_SC_PAGESIZE);
875 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
879 "unable to register addr 0x%p\n", memhdr->addr);
882 RTE_ETH_FOREACH_DEV(pid) {
883 struct rte_eth_dev *dev =
884 &rte_eth_devices[pid];
886 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
890 "unable to DMA map addr 0x%p "
892 memhdr->addr, dev->data->name);
898 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
899 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
901 struct rte_pktmbuf_extmem *xmem;
902 unsigned int ext_num, zone_num, elt_num;
905 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
906 elt_num = EXTBUF_ZONE_SIZE / elt_size;
907 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
909 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
911 TESTPMD_LOG(ERR, "Cannot allocate memory for "
912 "external buffer descriptors\n");
916 for (ext_num = 0; ext_num < zone_num; ext_num++) {
917 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
918 const struct rte_memzone *mz;
919 char mz_name[RTE_MEMZONE_NAMESIZE];
922 ret = snprintf(mz_name, sizeof(mz_name),
923 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
924 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
925 errno = ENAMETOOLONG;
929 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
931 RTE_MEMZONE_IOVA_CONTIG |
933 RTE_MEMZONE_SIZE_HINT_ONLY,
937 * The caller exits on external buffer creation
938 * error, so there is no need to free memzones.
944 xseg->buf_ptr = mz->addr;
945 xseg->buf_iova = mz->iova;
946 xseg->buf_len = EXTBUF_ZONE_SIZE;
947 xseg->elt_size = elt_size;
949 if (ext_num == 0 && xmem != NULL) {
958 * Configuration initialisation done once at init time.
960 static struct rte_mempool *
961 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
962 unsigned int socket_id, uint16_t size_idx)
964 char pool_name[RTE_MEMPOOL_NAMESIZE];
965 struct rte_mempool *rte_mp = NULL;
968 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
969 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
972 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
973 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
975 switch (mp_alloc_type) {
976 case MP_ALLOC_NATIVE:
978 /* wrapper to rte_mempool_create() */
979 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
980 rte_mbuf_best_mempool_ops());
981 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
982 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
987 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
988 mb_size, (unsigned int) mb_mempool_cache,
989 sizeof(struct rte_pktmbuf_pool_private),
990 socket_id, mempool_flags);
994 if (rte_mempool_populate_anon(rte_mp) == 0) {
995 rte_mempool_free(rte_mp);
999 rte_pktmbuf_pool_init(rte_mp, NULL);
1000 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1001 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1005 case MP_ALLOC_XMEM_HUGE:
1008 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1010 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1011 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1014 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1015 if (heap_socket < 0)
1016 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1018 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1019 rte_mbuf_best_mempool_ops());
1020 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1021 mb_mempool_cache, 0, mbuf_seg_size,
1027 struct rte_pktmbuf_extmem *ext_mem;
1028 unsigned int ext_num;
1030 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1031 socket_id, pool_name, &ext_mem);
1033 rte_exit(EXIT_FAILURE,
1034 "Can't create pinned data buffers\n");
1036 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1037 rte_mbuf_best_mempool_ops());
1038 rte_mp = rte_pktmbuf_pool_create_extbuf
1039 (pool_name, nb_mbuf, mb_mempool_cache,
1040 0, mbuf_seg_size, socket_id,
1047 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1052 if (rte_mp == NULL) {
1053 rte_exit(EXIT_FAILURE,
1054 "Creation of mbuf pool for socket %u failed: %s\n",
1055 socket_id, rte_strerror(rte_errno));
1056 } else if (verbose_level > 0) {
1057 rte_mempool_dump(stdout, rte_mp);
1063 * Check given socket id is valid or not with NUMA mode,
1064 * if valid, return 0, else return -1
1067 check_socket_id(const unsigned int socket_id)
1069 static int warning_once = 0;
1071 if (new_socket_id(socket_id)) {
1072 if (!warning_once && numa_support)
1073 printf("Warning: NUMA should be configured manually by"
1074 " using --port-numa-config and"
1075 " --ring-numa-config parameters along with"
1084 * Get the allowed maximum number of RX queues.
1085 * *pid return the port id which has minimal value of
1086 * max_rx_queues in all ports.
1089 get_allowed_max_nb_rxq(portid_t *pid)
1091 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1092 bool max_rxq_valid = false;
1094 struct rte_eth_dev_info dev_info;
1096 RTE_ETH_FOREACH_DEV(pi) {
1097 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1100 max_rxq_valid = true;
1101 if (dev_info.max_rx_queues < allowed_max_rxq) {
1102 allowed_max_rxq = dev_info.max_rx_queues;
1106 return max_rxq_valid ? allowed_max_rxq : 0;
1110 * Check input rxq is valid or not.
1111 * If input rxq is not greater than any of maximum number
1112 * of RX queues of all ports, it is valid.
1113 * if valid, return 0, else return -1
1116 check_nb_rxq(queueid_t rxq)
1118 queueid_t allowed_max_rxq;
1121 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1122 if (rxq > allowed_max_rxq) {
1123 printf("Fail: input rxq (%u) can't be greater "
1124 "than max_rx_queues (%u) of port %u\n",
1134 * Get the allowed maximum number of TX queues.
1135 * *pid return the port id which has minimal value of
1136 * max_tx_queues in all ports.
1139 get_allowed_max_nb_txq(portid_t *pid)
1141 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1142 bool max_txq_valid = false;
1144 struct rte_eth_dev_info dev_info;
1146 RTE_ETH_FOREACH_DEV(pi) {
1147 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1150 max_txq_valid = true;
1151 if (dev_info.max_tx_queues < allowed_max_txq) {
1152 allowed_max_txq = dev_info.max_tx_queues;
1156 return max_txq_valid ? allowed_max_txq : 0;
1160 * Check input txq is valid or not.
1161 * If input txq is not greater than any of maximum number
1162 * of TX queues of all ports, it is valid.
1163 * if valid, return 0, else return -1
1166 check_nb_txq(queueid_t txq)
1168 queueid_t allowed_max_txq;
1171 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1172 if (txq > allowed_max_txq) {
1173 printf("Fail: input txq (%u) can't be greater "
1174 "than max_tx_queues (%u) of port %u\n",
1184 * Get the allowed maximum number of RXDs of every rx queue.
1185 * *pid return the port id which has minimal value of
1186 * max_rxd in all queues of all ports.
1189 get_allowed_max_nb_rxd(portid_t *pid)
1191 uint16_t allowed_max_rxd = UINT16_MAX;
1193 struct rte_eth_dev_info dev_info;
1195 RTE_ETH_FOREACH_DEV(pi) {
1196 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1199 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1200 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1204 return allowed_max_rxd;
1208 * Get the allowed minimal number of RXDs of every rx queue.
1209 * *pid return the port id which has minimal value of
1210 * min_rxd in all queues of all ports.
1213 get_allowed_min_nb_rxd(portid_t *pid)
1215 uint16_t allowed_min_rxd = 0;
1217 struct rte_eth_dev_info dev_info;
1219 RTE_ETH_FOREACH_DEV(pi) {
1220 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1223 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1224 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1229 return allowed_min_rxd;
1233 * Check input rxd is valid or not.
1234 * If input rxd is not greater than any of maximum number
1235 * of RXDs of every Rx queues and is not less than any of
1236 * minimal number of RXDs of every Rx queues, it is valid.
1237 * if valid, return 0, else return -1
1240 check_nb_rxd(queueid_t rxd)
1242 uint16_t allowed_max_rxd;
1243 uint16_t allowed_min_rxd;
1246 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1247 if (rxd > allowed_max_rxd) {
1248 printf("Fail: input rxd (%u) can't be greater "
1249 "than max_rxds (%u) of port %u\n",
1256 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1257 if (rxd < allowed_min_rxd) {
1258 printf("Fail: input rxd (%u) can't be less "
1259 "than min_rxds (%u) of port %u\n",
1270 * Get the allowed maximum number of TXDs of every rx queues.
1271 * *pid return the port id which has minimal value of
1272 * max_txd in every tx queue.
1275 get_allowed_max_nb_txd(portid_t *pid)
1277 uint16_t allowed_max_txd = UINT16_MAX;
1279 struct rte_eth_dev_info dev_info;
1281 RTE_ETH_FOREACH_DEV(pi) {
1282 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1285 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1286 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1290 return allowed_max_txd;
1294 * Get the allowed maximum number of TXDs of every tx queues.
1295 * *pid return the port id which has minimal value of
1296 * min_txd in every tx queue.
1299 get_allowed_min_nb_txd(portid_t *pid)
1301 uint16_t allowed_min_txd = 0;
1303 struct rte_eth_dev_info dev_info;
1305 RTE_ETH_FOREACH_DEV(pi) {
1306 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1309 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1310 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1315 return allowed_min_txd;
1319 * Check input txd is valid or not.
1320 * If input txd is not greater than any of maximum number
1321 * of TXDs of every Rx queues, it is valid.
1322 * if valid, return 0, else return -1
1325 check_nb_txd(queueid_t txd)
1327 uint16_t allowed_max_txd;
1328 uint16_t allowed_min_txd;
1331 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1332 if (txd > allowed_max_txd) {
1333 printf("Fail: input txd (%u) can't be greater "
1334 "than max_txds (%u) of port %u\n",
1341 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1342 if (txd < allowed_min_txd) {
1343 printf("Fail: input txd (%u) can't be less "
1344 "than min_txds (%u) of port %u\n",
1355 * Get the allowed maximum number of hairpin queues.
1356 * *pid return the port id which has minimal value of
1357 * max_hairpin_queues in all ports.
1360 get_allowed_max_nb_hairpinq(portid_t *pid)
1362 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1364 struct rte_eth_hairpin_cap cap;
1366 RTE_ETH_FOREACH_DEV(pi) {
1367 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1371 if (cap.max_nb_queues < allowed_max_hairpinq) {
1372 allowed_max_hairpinq = cap.max_nb_queues;
1376 return allowed_max_hairpinq;
1380 * Check input hairpin is valid or not.
1381 * If input hairpin is not greater than any of maximum number
1382 * of hairpin queues of all ports, it is valid.
1383 * if valid, return 0, else return -1
1386 check_nb_hairpinq(queueid_t hairpinq)
1388 queueid_t allowed_max_hairpinq;
1391 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1392 if (hairpinq > allowed_max_hairpinq) {
1393 printf("Fail: input hairpin (%u) can't be greater "
1394 "than max_hairpin_queues (%u) of port %u\n",
1395 hairpinq, allowed_max_hairpinq, pid);
1405 struct rte_port *port;
1406 struct rte_mempool *mbp;
1407 unsigned int nb_mbuf_per_pool;
1409 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1410 struct rte_gro_param gro_param;
1417 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1419 /* Configuration of logical cores. */
1420 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1421 sizeof(struct fwd_lcore *) * nb_lcores,
1422 RTE_CACHE_LINE_SIZE);
1423 if (fwd_lcores == NULL) {
1424 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1425 "failed\n", nb_lcores);
1427 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1428 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1429 sizeof(struct fwd_lcore),
1430 RTE_CACHE_LINE_SIZE);
1431 if (fwd_lcores[lc_id] == NULL) {
1432 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1435 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1438 RTE_ETH_FOREACH_DEV(pid) {
1440 /* Apply default TxRx configuration for all ports */
1441 port->dev_conf.txmode = tx_mode;
1442 port->dev_conf.rxmode = rx_mode;
1444 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1446 rte_exit(EXIT_FAILURE,
1447 "rte_eth_dev_info_get() failed\n");
1449 if (!(port->dev_info.tx_offload_capa &
1450 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1451 port->dev_conf.txmode.offloads &=
1452 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1454 if (port_numa[pid] != NUMA_NO_CONFIG)
1455 port_per_socket[port_numa[pid]]++;
1457 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1460 * if socket_id is invalid,
1461 * set to the first available socket.
1463 if (check_socket_id(socket_id) < 0)
1464 socket_id = socket_ids[0];
1465 port_per_socket[socket_id]++;
1469 /* Apply Rx offloads configuration */
1470 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1471 port->rx_conf[k].offloads =
1472 port->dev_conf.rxmode.offloads;
1473 /* Apply Tx offloads configuration */
1474 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1475 port->tx_conf[k].offloads =
1476 port->dev_conf.txmode.offloads;
1478 /* set flag to initialize port/queue */
1479 port->need_reconfig = 1;
1480 port->need_reconfig_queues = 1;
1481 port->tx_metadata = 0;
1483 /* Check for maximum number of segments per MTU. Accordingly
1484 * update the mbuf data size.
1486 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1487 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1488 data_size = rx_mode.max_rx_pkt_len /
1489 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1491 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1492 mbuf_data_size[0]) {
1493 mbuf_data_size[0] = data_size +
1494 RTE_PKTMBUF_HEADROOM;
1501 TESTPMD_LOG(WARNING,
1502 "Configured mbuf size of the first segment %hu\n",
1505 * Create pools of mbuf.
1506 * If NUMA support is disabled, create a single pool of mbuf in
1507 * socket 0 memory by default.
1508 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1510 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1511 * nb_txd can be configured at run time.
1513 if (param_total_num_mbufs)
1514 nb_mbuf_per_pool = param_total_num_mbufs;
1516 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1517 (nb_lcores * mb_mempool_cache) +
1518 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1519 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1525 for (i = 0; i < num_sockets; i++)
1526 for (j = 0; j < mbuf_data_size_n; j++)
1527 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1528 mbuf_pool_create(mbuf_data_size[j],
1534 for (i = 0; i < mbuf_data_size_n; i++)
1535 mempools[i] = mbuf_pool_create
1538 socket_num == UMA_NO_CONFIG ?
1544 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1545 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1547 * Records which Mbuf pool to use by each logical core, if needed.
1549 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1550 mbp = mbuf_pool_find(
1551 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1554 mbp = mbuf_pool_find(0, 0);
1555 fwd_lcores[lc_id]->mbp = mbp;
1556 /* initialize GSO context */
1557 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1558 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1559 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1560 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1562 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1565 /* Configuration of packet forwarding streams. */
1566 if (init_fwd_streams() < 0)
1567 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1571 /* create a gro context for each lcore */
1572 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1573 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1574 gro_param.max_item_per_flow = MAX_PKT_BURST;
1575 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1576 gro_param.socket_id = rte_lcore_to_socket_id(
1577 fwd_lcores_cpuids[lc_id]);
1578 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1579 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1580 rte_exit(EXIT_FAILURE,
1581 "rte_gro_ctx_create() failed\n");
1588 reconfig(portid_t new_port_id, unsigned socket_id)
1590 struct rte_port *port;
1593 /* Reconfiguration of Ethernet ports. */
1594 port = &ports[new_port_id];
1596 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1600 /* set flag to initialize port/queue */
1601 port->need_reconfig = 1;
1602 port->need_reconfig_queues = 1;
1603 port->socket_id = socket_id;
1610 init_fwd_streams(void)
1613 struct rte_port *port;
1614 streamid_t sm_id, nb_fwd_streams_new;
1617 /* set socket id according to numa or not */
1618 RTE_ETH_FOREACH_DEV(pid) {
1620 if (nb_rxq > port->dev_info.max_rx_queues) {
1621 printf("Fail: nb_rxq(%d) is greater than "
1622 "max_rx_queues(%d)\n", nb_rxq,
1623 port->dev_info.max_rx_queues);
1626 if (nb_txq > port->dev_info.max_tx_queues) {
1627 printf("Fail: nb_txq(%d) is greater than "
1628 "max_tx_queues(%d)\n", nb_txq,
1629 port->dev_info.max_tx_queues);
1633 if (port_numa[pid] != NUMA_NO_CONFIG)
1634 port->socket_id = port_numa[pid];
1636 port->socket_id = rte_eth_dev_socket_id(pid);
1639 * if socket_id is invalid,
1640 * set to the first available socket.
1642 if (check_socket_id(port->socket_id) < 0)
1643 port->socket_id = socket_ids[0];
1647 if (socket_num == UMA_NO_CONFIG)
1648 port->socket_id = 0;
1650 port->socket_id = socket_num;
1654 q = RTE_MAX(nb_rxq, nb_txq);
1656 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1659 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1660 if (nb_fwd_streams_new == nb_fwd_streams)
1663 if (fwd_streams != NULL) {
1664 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1665 if (fwd_streams[sm_id] == NULL)
1667 rte_free(fwd_streams[sm_id]);
1668 fwd_streams[sm_id] = NULL;
1670 rte_free(fwd_streams);
1675 nb_fwd_streams = nb_fwd_streams_new;
1676 if (nb_fwd_streams) {
1677 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1678 sizeof(struct fwd_stream *) * nb_fwd_streams,
1679 RTE_CACHE_LINE_SIZE);
1680 if (fwd_streams == NULL)
1681 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1682 " (struct fwd_stream *)) failed\n",
1685 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1686 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1687 " struct fwd_stream", sizeof(struct fwd_stream),
1688 RTE_CACHE_LINE_SIZE);
1689 if (fwd_streams[sm_id] == NULL)
1690 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1691 "(struct fwd_stream) failed\n");
1699 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1701 uint64_t total_burst, sburst;
1703 uint64_t burst_stats[4];
1704 uint16_t pktnb_stats[4];
1706 int burst_percent[4], sburstp;
1710 * First compute the total number of packet bursts and the
1711 * two highest numbers of bursts of the same number of packets.
1713 memset(&burst_stats, 0x0, sizeof(burst_stats));
1714 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1716 /* Show stats for 0 burst size always */
1717 total_burst = pbs->pkt_burst_spread[0];
1718 burst_stats[0] = pbs->pkt_burst_spread[0];
1721 /* Find the next 2 burst sizes with highest occurrences. */
1722 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1723 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1728 total_burst += nb_burst;
1730 if (nb_burst > burst_stats[1]) {
1731 burst_stats[2] = burst_stats[1];
1732 pktnb_stats[2] = pktnb_stats[1];
1733 burst_stats[1] = nb_burst;
1734 pktnb_stats[1] = nb_pkt;
1735 } else if (nb_burst > burst_stats[2]) {
1736 burst_stats[2] = nb_burst;
1737 pktnb_stats[2] = nb_pkt;
1740 if (total_burst == 0)
1743 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1744 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1746 printf("%d%% of other]\n", 100 - sburstp);
1750 sburst += burst_stats[i];
1751 if (sburst == total_burst) {
1752 printf("%d%% of %d pkts]\n",
1753 100 - sburstp, (int) pktnb_stats[i]);
1758 (double)burst_stats[i] / total_burst * 100;
1759 printf("%d%% of %d pkts + ",
1760 burst_percent[i], (int) pktnb_stats[i]);
1761 sburstp += burst_percent[i];
1766 fwd_stream_stats_display(streamid_t stream_id)
1768 struct fwd_stream *fs;
1769 static const char *fwd_top_stats_border = "-------";
1771 fs = fwd_streams[stream_id];
1772 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1773 (fs->fwd_dropped == 0))
1775 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1776 "TX Port=%2d/Queue=%2d %s\n",
1777 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1778 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1779 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1780 " TX-dropped: %-14"PRIu64,
1781 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1783 /* if checksum mode */
1784 if (cur_fwd_eng == &csum_fwd_engine) {
1785 printf(" RX- bad IP checksum: %-14"PRIu64
1786 " Rx- bad L4 checksum: %-14"PRIu64
1787 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1788 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1789 fs->rx_bad_outer_l4_csum);
1794 if (record_burst_stats) {
1795 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1796 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1801 fwd_stats_display(void)
1803 static const char *fwd_stats_border = "----------------------";
1804 static const char *acc_stats_border = "+++++++++++++++";
1806 struct fwd_stream *rx_stream;
1807 struct fwd_stream *tx_stream;
1808 uint64_t tx_dropped;
1809 uint64_t rx_bad_ip_csum;
1810 uint64_t rx_bad_l4_csum;
1811 uint64_t rx_bad_outer_l4_csum;
1812 } ports_stats[RTE_MAX_ETHPORTS];
1813 uint64_t total_rx_dropped = 0;
1814 uint64_t total_tx_dropped = 0;
1815 uint64_t total_rx_nombuf = 0;
1816 struct rte_eth_stats stats;
1817 uint64_t fwd_cycles = 0;
1818 uint64_t total_recv = 0;
1819 uint64_t total_xmit = 0;
1820 struct rte_port *port;
1825 memset(ports_stats, 0, sizeof(ports_stats));
1827 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1828 struct fwd_stream *fs = fwd_streams[sm_id];
1830 if (cur_fwd_config.nb_fwd_streams >
1831 cur_fwd_config.nb_fwd_ports) {
1832 fwd_stream_stats_display(sm_id);
1834 ports_stats[fs->tx_port].tx_stream = fs;
1835 ports_stats[fs->rx_port].rx_stream = fs;
1838 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1840 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1841 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1842 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1843 fs->rx_bad_outer_l4_csum;
1845 if (record_core_cycles)
1846 fwd_cycles += fs->core_cycles;
1848 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1849 pt_id = fwd_ports_ids[i];
1850 port = &ports[pt_id];
1852 rte_eth_stats_get(pt_id, &stats);
1853 stats.ipackets -= port->stats.ipackets;
1854 stats.opackets -= port->stats.opackets;
1855 stats.ibytes -= port->stats.ibytes;
1856 stats.obytes -= port->stats.obytes;
1857 stats.imissed -= port->stats.imissed;
1858 stats.oerrors -= port->stats.oerrors;
1859 stats.rx_nombuf -= port->stats.rx_nombuf;
1861 total_recv += stats.ipackets;
1862 total_xmit += stats.opackets;
1863 total_rx_dropped += stats.imissed;
1864 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1865 total_tx_dropped += stats.oerrors;
1866 total_rx_nombuf += stats.rx_nombuf;
1868 printf("\n %s Forward statistics for port %-2d %s\n",
1869 fwd_stats_border, pt_id, fwd_stats_border);
1871 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
1872 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
1873 stats.ipackets + stats.imissed);
1875 if (cur_fwd_eng == &csum_fwd_engine)
1876 printf(" Bad-ipcsum: %-14"PRIu64
1877 " Bad-l4csum: %-14"PRIu64
1878 "Bad-outer-l4csum: %-14"PRIu64"\n",
1879 ports_stats[pt_id].rx_bad_ip_csum,
1880 ports_stats[pt_id].rx_bad_l4_csum,
1881 ports_stats[pt_id].rx_bad_outer_l4_csum);
1882 if (stats.ierrors + stats.rx_nombuf > 0) {
1883 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
1884 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
1887 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
1888 "TX-total: %-"PRIu64"\n",
1889 stats.opackets, ports_stats[pt_id].tx_dropped,
1890 stats.opackets + ports_stats[pt_id].tx_dropped);
1892 if (record_burst_stats) {
1893 if (ports_stats[pt_id].rx_stream)
1894 pkt_burst_stats_display("RX",
1895 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1896 if (ports_stats[pt_id].tx_stream)
1897 pkt_burst_stats_display("TX",
1898 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1901 printf(" %s--------------------------------%s\n",
1902 fwd_stats_border, fwd_stats_border);
1905 printf("\n %s Accumulated forward statistics for all ports"
1907 acc_stats_border, acc_stats_border);
1908 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1910 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1912 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1913 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1914 if (total_rx_nombuf > 0)
1915 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1916 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1918 acc_stats_border, acc_stats_border);
1919 if (record_core_cycles) {
1920 #define CYC_PER_MHZ 1E6
1921 if (total_recv > 0 || total_xmit > 0) {
1922 uint64_t total_pkts = 0;
1923 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
1924 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
1925 total_pkts = total_xmit;
1927 total_pkts = total_recv;
1929 printf("\n CPU cycles/packet=%.2F (total cycles="
1930 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
1932 (double) fwd_cycles / total_pkts,
1933 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
1934 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1940 fwd_stats_reset(void)
1946 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1947 pt_id = fwd_ports_ids[i];
1948 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1950 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1951 struct fwd_stream *fs = fwd_streams[sm_id];
1955 fs->fwd_dropped = 0;
1956 fs->rx_bad_ip_csum = 0;
1957 fs->rx_bad_l4_csum = 0;
1958 fs->rx_bad_outer_l4_csum = 0;
1960 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1961 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1962 fs->core_cycles = 0;
1967 flush_fwd_rx_queues(void)
1969 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1976 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1977 uint64_t timer_period;
1979 /* convert to number of cycles */
1980 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1982 for (j = 0; j < 2; j++) {
1983 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1984 for (rxq = 0; rxq < nb_rxq; rxq++) {
1985 port_id = fwd_ports_ids[rxp];
1987 * testpmd can stuck in the below do while loop
1988 * if rte_eth_rx_burst() always returns nonzero
1989 * packets. So timer is added to exit this loop
1990 * after 1sec timer expiry.
1992 prev_tsc = rte_rdtsc();
1994 nb_rx = rte_eth_rx_burst(port_id, rxq,
1995 pkts_burst, MAX_PKT_BURST);
1996 for (i = 0; i < nb_rx; i++)
1997 rte_pktmbuf_free(pkts_burst[i]);
1999 cur_tsc = rte_rdtsc();
2000 diff_tsc = cur_tsc - prev_tsc;
2001 timer_tsc += diff_tsc;
2002 } while ((nb_rx > 0) &&
2003 (timer_tsc < timer_period));
2007 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2012 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2014 struct fwd_stream **fsm;
2017 #ifdef RTE_LIB_BITRATESTATS
2018 uint64_t tics_per_1sec;
2019 uint64_t tics_datum;
2020 uint64_t tics_current;
2021 uint16_t i, cnt_ports;
2023 cnt_ports = nb_ports;
2024 tics_datum = rte_rdtsc();
2025 tics_per_1sec = rte_get_timer_hz();
2027 fsm = &fwd_streams[fc->stream_idx];
2028 nb_fs = fc->stream_nb;
2030 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2031 (*pkt_fwd)(fsm[sm_id]);
2032 #ifdef RTE_LIB_BITRATESTATS
2033 if (bitrate_enabled != 0 &&
2034 bitrate_lcore_id == rte_lcore_id()) {
2035 tics_current = rte_rdtsc();
2036 if (tics_current - tics_datum >= tics_per_1sec) {
2037 /* Periodic bitrate calculation */
2038 for (i = 0; i < cnt_ports; i++)
2039 rte_stats_bitrate_calc(bitrate_data,
2041 tics_datum = tics_current;
2045 #ifdef RTE_LIB_LATENCYSTATS
2046 if (latencystats_enabled != 0 &&
2047 latencystats_lcore_id == rte_lcore_id())
2048 rte_latencystats_update();
2051 } while (! fc->stopped);
2055 start_pkt_forward_on_core(void *fwd_arg)
2057 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2058 cur_fwd_config.fwd_eng->packet_fwd);
2063 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2064 * Used to start communication flows in network loopback test configurations.
2067 run_one_txonly_burst_on_core(void *fwd_arg)
2069 struct fwd_lcore *fwd_lc;
2070 struct fwd_lcore tmp_lcore;
2072 fwd_lc = (struct fwd_lcore *) fwd_arg;
2073 tmp_lcore = *fwd_lc;
2074 tmp_lcore.stopped = 1;
2075 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2080 * Launch packet forwarding:
2081 * - Setup per-port forwarding context.
2082 * - launch logical cores with their forwarding configuration.
2085 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2087 port_fwd_begin_t port_fwd_begin;
2092 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2093 if (port_fwd_begin != NULL) {
2094 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2095 (*port_fwd_begin)(fwd_ports_ids[i]);
2097 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2098 lc_id = fwd_lcores_cpuids[i];
2099 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2100 fwd_lcores[i]->stopped = 0;
2101 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2102 fwd_lcores[i], lc_id);
2104 printf("launch lcore %u failed - diag=%d\n",
2111 * Launch packet forwarding configuration.
2114 start_packet_forwarding(int with_tx_first)
2116 port_fwd_begin_t port_fwd_begin;
2117 port_fwd_end_t port_fwd_end;
2118 struct rte_port *port;
2122 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2123 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2125 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2126 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2128 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2129 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2130 (!nb_rxq || !nb_txq))
2131 rte_exit(EXIT_FAILURE,
2132 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2133 cur_fwd_eng->fwd_mode_name);
2135 if (all_ports_started() == 0) {
2136 printf("Not all ports were started\n");
2139 if (test_done == 0) {
2140 printf("Packet forwarding already started\n");
2146 for (i = 0; i < nb_fwd_ports; i++) {
2147 pt_id = fwd_ports_ids[i];
2148 port = &ports[pt_id];
2149 if (!port->dcb_flag) {
2150 printf("In DCB mode, all forwarding ports must "
2151 "be configured in this mode.\n");
2155 if (nb_fwd_lcores == 1) {
2156 printf("In DCB mode,the nb forwarding cores "
2157 "should be larger than 1.\n");
2166 flush_fwd_rx_queues();
2168 pkt_fwd_config_display(&cur_fwd_config);
2169 rxtx_config_display();
2172 if (with_tx_first) {
2173 port_fwd_begin = tx_only_engine.port_fwd_begin;
2174 if (port_fwd_begin != NULL) {
2175 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2176 (*port_fwd_begin)(fwd_ports_ids[i]);
2178 while (with_tx_first--) {
2179 launch_packet_forwarding(
2180 run_one_txonly_burst_on_core);
2181 rte_eal_mp_wait_lcore();
2183 port_fwd_end = tx_only_engine.port_fwd_end;
2184 if (port_fwd_end != NULL) {
2185 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2186 (*port_fwd_end)(fwd_ports_ids[i]);
2189 launch_packet_forwarding(start_pkt_forward_on_core);
2193 stop_packet_forwarding(void)
2195 port_fwd_end_t port_fwd_end;
2201 printf("Packet forwarding not started\n");
2204 printf("Telling cores to stop...");
2205 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2206 fwd_lcores[lc_id]->stopped = 1;
2207 printf("\nWaiting for lcores to finish...\n");
2208 rte_eal_mp_wait_lcore();
2209 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2210 if (port_fwd_end != NULL) {
2211 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2212 pt_id = fwd_ports_ids[i];
2213 (*port_fwd_end)(pt_id);
2217 fwd_stats_display();
2219 printf("\nDone.\n");
2224 dev_set_link_up(portid_t pid)
2226 if (rte_eth_dev_set_link_up(pid) < 0)
2227 printf("\nSet link up fail.\n");
2231 dev_set_link_down(portid_t pid)
2233 if (rte_eth_dev_set_link_down(pid) < 0)
2234 printf("\nSet link down fail.\n");
2238 all_ports_started(void)
2241 struct rte_port *port;
2243 RTE_ETH_FOREACH_DEV(pi) {
2245 /* Check if there is a port which is not started */
2246 if ((port->port_status != RTE_PORT_STARTED) &&
2247 (port->slave_flag == 0))
2251 /* No port is not started */
2256 port_is_stopped(portid_t port_id)
2258 struct rte_port *port = &ports[port_id];
2260 if ((port->port_status != RTE_PORT_STOPPED) &&
2261 (port->slave_flag == 0))
2267 all_ports_stopped(void)
2271 RTE_ETH_FOREACH_DEV(pi) {
2272 if (!port_is_stopped(pi))
2280 port_is_started(portid_t port_id)
2282 if (port_id_is_invalid(port_id, ENABLED_WARN))
2285 if (ports[port_id].port_status != RTE_PORT_STARTED)
2291 /* Configure the Rx and Tx hairpin queues for the selected port. */
2293 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2296 struct rte_eth_hairpin_conf hairpin_conf = {
2301 struct rte_port *port = &ports[pi];
2302 uint16_t peer_rx_port = pi;
2303 uint16_t peer_tx_port = pi;
2304 uint32_t manual = 1;
2305 uint32_t tx_exp = hairpin_mode & 0x10;
2307 if (!(hairpin_mode & 0xf)) {
2311 } else if (hairpin_mode & 0x1) {
2312 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2313 RTE_ETH_DEV_NO_OWNER);
2314 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2315 peer_tx_port = rte_eth_find_next_owned_by(0,
2316 RTE_ETH_DEV_NO_OWNER);
2317 if (p_pi != RTE_MAX_ETHPORTS) {
2318 peer_rx_port = p_pi;
2322 /* Last port will be the peer RX port of the first. */
2323 RTE_ETH_FOREACH_DEV(next_pi)
2324 peer_rx_port = next_pi;
2327 } else if (hairpin_mode & 0x2) {
2329 peer_rx_port = p_pi;
2331 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2332 RTE_ETH_DEV_NO_OWNER);
2333 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2336 peer_tx_port = peer_rx_port;
2340 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2341 hairpin_conf.peers[0].port = peer_rx_port;
2342 hairpin_conf.peers[0].queue = i + nb_rxq;
2343 hairpin_conf.manual_bind = !!manual;
2344 hairpin_conf.tx_explicit = !!tx_exp;
2345 diag = rte_eth_tx_hairpin_queue_setup
2346 (pi, qi, nb_txd, &hairpin_conf);
2351 /* Fail to setup rx queue, return */
2352 if (rte_atomic16_cmpset(&(port->port_status),
2354 RTE_PORT_STOPPED) == 0)
2355 printf("Port %d can not be set back "
2356 "to stopped\n", pi);
2357 printf("Fail to configure port %d hairpin "
2359 /* try to reconfigure queues next time */
2360 port->need_reconfig_queues = 1;
2363 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2364 hairpin_conf.peers[0].port = peer_tx_port;
2365 hairpin_conf.peers[0].queue = i + nb_txq;
2366 hairpin_conf.manual_bind = !!manual;
2367 hairpin_conf.tx_explicit = !!tx_exp;
2368 diag = rte_eth_rx_hairpin_queue_setup
2369 (pi, qi, nb_rxd, &hairpin_conf);
2374 /* Fail to setup rx queue, return */
2375 if (rte_atomic16_cmpset(&(port->port_status),
2377 RTE_PORT_STOPPED) == 0)
2378 printf("Port %d can not be set back "
2379 "to stopped\n", pi);
2380 printf("Fail to configure port %d hairpin "
2382 /* try to reconfigure queues next time */
2383 port->need_reconfig_queues = 1;
2389 /* Configure the Rx with optional split. */
2391 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2392 uint16_t nb_rx_desc, unsigned int socket_id,
2393 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2395 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2396 unsigned int i, mp_n;
2399 if (rx_pkt_nb_segs <= 1 ||
2400 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2401 rx_conf->rx_seg = NULL;
2402 rx_conf->rx_nseg = 0;
2403 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2404 nb_rx_desc, socket_id,
2408 for (i = 0; i < rx_pkt_nb_segs; i++) {
2409 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2410 struct rte_mempool *mpx;
2412 * Use last valid pool for the segments with number
2413 * exceeding the pool index.
2415 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2416 mpx = mbuf_pool_find(socket_id, mp_n);
2417 /* Handle zero as mbuf data buffer size. */
2418 rx_seg->length = rx_pkt_seg_lengths[i] ?
2419 rx_pkt_seg_lengths[i] :
2420 mbuf_data_size[mp_n];
2421 rx_seg->offset = i < rx_pkt_nb_offs ?
2422 rx_pkt_seg_offsets[i] : 0;
2423 rx_seg->mp = mpx ? mpx : mp;
2425 rx_conf->rx_nseg = rx_pkt_nb_segs;
2426 rx_conf->rx_seg = rx_useg;
2427 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2428 socket_id, rx_conf, NULL);
2429 rx_conf->rx_seg = NULL;
2430 rx_conf->rx_nseg = 0;
2435 start_port(portid_t pid)
2437 int diag, need_check_link_status = -1;
2439 portid_t p_pi = RTE_MAX_ETHPORTS;
2440 portid_t pl[RTE_MAX_ETHPORTS];
2441 portid_t peer_pl[RTE_MAX_ETHPORTS];
2442 uint16_t cnt_pi = 0;
2443 uint16_t cfg_pi = 0;
2446 struct rte_port *port;
2447 struct rte_ether_addr mac_addr;
2448 struct rte_eth_hairpin_cap cap;
2450 if (port_id_is_invalid(pid, ENABLED_WARN))
2455 RTE_ETH_FOREACH_DEV(pi) {
2456 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2459 need_check_link_status = 0;
2461 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2462 RTE_PORT_HANDLING) == 0) {
2463 printf("Port %d is now not stopped\n", pi);
2467 if (port->need_reconfig > 0) {
2468 port->need_reconfig = 0;
2470 if (flow_isolate_all) {
2471 int ret = port_flow_isolate(pi, 1);
2473 printf("Failed to apply isolated"
2474 " mode on port %d\n", pi);
2478 configure_rxtx_dump_callbacks(0);
2479 printf("Configuring Port %d (socket %u)\n", pi,
2481 if (nb_hairpinq > 0 &&
2482 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2483 printf("Port %d doesn't support hairpin "
2487 /* configure port */
2488 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2489 nb_txq + nb_hairpinq,
2492 if (rte_atomic16_cmpset(&(port->port_status),
2493 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2494 printf("Port %d can not be set back "
2495 "to stopped\n", pi);
2496 printf("Fail to configure port %d\n", pi);
2497 /* try to reconfigure port next time */
2498 port->need_reconfig = 1;
2502 if (port->need_reconfig_queues > 0) {
2503 port->need_reconfig_queues = 0;
2504 /* setup tx queues */
2505 for (qi = 0; qi < nb_txq; qi++) {
2506 if ((numa_support) &&
2507 (txring_numa[pi] != NUMA_NO_CONFIG))
2508 diag = rte_eth_tx_queue_setup(pi, qi,
2509 port->nb_tx_desc[qi],
2511 &(port->tx_conf[qi]));
2513 diag = rte_eth_tx_queue_setup(pi, qi,
2514 port->nb_tx_desc[qi],
2516 &(port->tx_conf[qi]));
2521 /* Fail to setup tx queue, return */
2522 if (rte_atomic16_cmpset(&(port->port_status),
2524 RTE_PORT_STOPPED) == 0)
2525 printf("Port %d can not be set back "
2526 "to stopped\n", pi);
2527 printf("Fail to configure port %d tx queues\n",
2529 /* try to reconfigure queues next time */
2530 port->need_reconfig_queues = 1;
2533 for (qi = 0; qi < nb_rxq; qi++) {
2534 /* setup rx queues */
2535 if ((numa_support) &&
2536 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2537 struct rte_mempool * mp =
2539 (rxring_numa[pi], 0);
2541 printf("Failed to setup RX queue:"
2542 "No mempool allocation"
2543 " on the socket %d\n",
2548 diag = rx_queue_setup(pi, qi,
2549 port->nb_rx_desc[qi],
2551 &(port->rx_conf[qi]),
2554 struct rte_mempool *mp =
2556 (port->socket_id, 0);
2558 printf("Failed to setup RX queue:"
2559 "No mempool allocation"
2560 " on the socket %d\n",
2564 diag = rx_queue_setup(pi, qi,
2565 port->nb_rx_desc[qi],
2567 &(port->rx_conf[qi]),
2573 /* Fail to setup rx queue, return */
2574 if (rte_atomic16_cmpset(&(port->port_status),
2576 RTE_PORT_STOPPED) == 0)
2577 printf("Port %d can not be set back "
2578 "to stopped\n", pi);
2579 printf("Fail to configure port %d rx queues\n",
2581 /* try to reconfigure queues next time */
2582 port->need_reconfig_queues = 1;
2585 /* setup hairpin queues */
2586 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2589 configure_rxtx_dump_callbacks(verbose_level);
2591 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2595 "Port %d: Failed to disable Ptype parsing\n",
2603 if (rte_eth_dev_start(pi) < 0) {
2604 printf("Fail to start port %d\n", pi);
2606 /* Fail to setup rx queue, return */
2607 if (rte_atomic16_cmpset(&(port->port_status),
2608 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2609 printf("Port %d can not be set back to "
2614 if (rte_atomic16_cmpset(&(port->port_status),
2615 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2616 printf("Port %d can not be set into started\n", pi);
2618 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2619 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2620 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2621 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2622 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2624 /* at least one port started, need checking link status */
2625 need_check_link_status = 1;
2630 if (need_check_link_status == 1 && !no_link_check)
2631 check_all_ports_link_status(RTE_PORT_ALL);
2632 else if (need_check_link_status == 0)
2633 printf("Please stop the ports first\n");
2635 if (hairpin_mode & 0xf) {
2639 /* bind all started hairpin ports */
2640 for (i = 0; i < cfg_pi; i++) {
2642 /* bind current Tx to all peer Rx */
2643 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2644 RTE_MAX_ETHPORTS, 1);
2647 for (j = 0; j < peer_pi; j++) {
2648 if (!port_is_started(peer_pl[j]))
2650 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2652 printf("Error during binding hairpin"
2653 " Tx port %u to %u: %s\n",
2655 rte_strerror(-diag));
2659 /* bind all peer Tx to current Rx */
2660 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2661 RTE_MAX_ETHPORTS, 0);
2664 for (j = 0; j < peer_pi; j++) {
2665 if (!port_is_started(peer_pl[j]))
2667 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2669 printf("Error during binding hairpin"
2670 " Tx port %u to %u: %s\n",
2672 rte_strerror(-diag));
2684 stop_port(portid_t pid)
2687 struct rte_port *port;
2688 int need_check_link_status = 0;
2689 portid_t peer_pl[RTE_MAX_ETHPORTS];
2697 if (port_id_is_invalid(pid, ENABLED_WARN))
2700 printf("Stopping ports...\n");
2702 RTE_ETH_FOREACH_DEV(pi) {
2703 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2706 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2707 printf("Please remove port %d from forwarding configuration.\n", pi);
2711 if (port_is_bonding_slave(pi)) {
2712 printf("Please remove port %d from bonded device.\n", pi);
2717 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2718 RTE_PORT_HANDLING) == 0)
2721 if (hairpin_mode & 0xf) {
2724 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2725 /* unbind all peer Tx from current Rx */
2726 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2727 RTE_MAX_ETHPORTS, 0);
2730 for (j = 0; j < peer_pi; j++) {
2731 if (!port_is_started(peer_pl[j]))
2733 rte_eth_hairpin_unbind(peer_pl[j], pi);
2737 if (rte_eth_dev_stop(pi) != 0)
2738 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2741 if (rte_atomic16_cmpset(&(port->port_status),
2742 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2743 printf("Port %d can not be set into stopped\n", pi);
2744 need_check_link_status = 1;
2746 if (need_check_link_status && !no_link_check)
2747 check_all_ports_link_status(RTE_PORT_ALL);
2753 remove_invalid_ports_in(portid_t *array, portid_t *total)
2756 portid_t new_total = 0;
2758 for (i = 0; i < *total; i++)
2759 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2760 array[new_total] = array[i];
2767 remove_invalid_ports(void)
2769 remove_invalid_ports_in(ports_ids, &nb_ports);
2770 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2771 nb_cfg_ports = nb_fwd_ports;
2775 close_port(portid_t pid)
2778 struct rte_port *port;
2780 if (port_id_is_invalid(pid, ENABLED_WARN))
2783 printf("Closing ports...\n");
2785 RTE_ETH_FOREACH_DEV(pi) {
2786 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2789 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2790 printf("Please remove port %d from forwarding configuration.\n", pi);
2794 if (port_is_bonding_slave(pi)) {
2795 printf("Please remove port %d from bonded device.\n", pi);
2800 if (rte_atomic16_cmpset(&(port->port_status),
2801 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2802 printf("Port %d is already closed\n", pi);
2806 port_flow_flush(pi);
2807 rte_eth_dev_close(pi);
2810 remove_invalid_ports();
2815 reset_port(portid_t pid)
2819 struct rte_port *port;
2821 if (port_id_is_invalid(pid, ENABLED_WARN))
2824 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2825 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2826 printf("Can not reset port(s), please stop port(s) first.\n");
2830 printf("Resetting ports...\n");
2832 RTE_ETH_FOREACH_DEV(pi) {
2833 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2836 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2837 printf("Please remove port %d from forwarding "
2838 "configuration.\n", pi);
2842 if (port_is_bonding_slave(pi)) {
2843 printf("Please remove port %d from bonded device.\n",
2848 diag = rte_eth_dev_reset(pi);
2851 port->need_reconfig = 1;
2852 port->need_reconfig_queues = 1;
2854 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2862 attach_port(char *identifier)
2865 struct rte_dev_iterator iterator;
2867 printf("Attaching a new port...\n");
2869 if (identifier == NULL) {
2870 printf("Invalid parameters are specified\n");
2874 if (rte_dev_probe(identifier) < 0) {
2875 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2879 /* first attach mode: event */
2880 if (setup_on_probe_event) {
2881 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2882 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2883 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2884 ports[pi].need_setup != 0)
2885 setup_attached_port(pi);
2889 /* second attach mode: iterator */
2890 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2891 /* setup ports matching the devargs used for probing */
2892 if (port_is_forwarding(pi))
2893 continue; /* port was already attached before */
2894 setup_attached_port(pi);
2899 setup_attached_port(portid_t pi)
2901 unsigned int socket_id;
2904 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2905 /* if socket_id is invalid, set to the first available socket. */
2906 if (check_socket_id(socket_id) < 0)
2907 socket_id = socket_ids[0];
2908 reconfig(pi, socket_id);
2909 ret = rte_eth_promiscuous_enable(pi);
2911 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2912 pi, rte_strerror(-ret));
2914 ports_ids[nb_ports++] = pi;
2915 fwd_ports_ids[nb_fwd_ports++] = pi;
2916 nb_cfg_ports = nb_fwd_ports;
2917 ports[pi].need_setup = 0;
2918 ports[pi].port_status = RTE_PORT_STOPPED;
2920 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2925 detach_device(struct rte_device *dev)
2930 printf("Device already removed\n");
2934 printf("Removing a device...\n");
2936 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2937 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2938 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
2939 printf("Port %u not stopped\n", sibling);
2942 port_flow_flush(sibling);
2946 if (rte_dev_remove(dev) < 0) {
2947 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2950 remove_invalid_ports();
2952 printf("Device is detached\n");
2953 printf("Now total ports is %d\n", nb_ports);
2959 detach_port_device(portid_t port_id)
2961 if (port_id_is_invalid(port_id, ENABLED_WARN))
2964 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2965 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2966 printf("Port not stopped\n");
2969 printf("Port was not closed\n");
2972 detach_device(rte_eth_devices[port_id].device);
2976 detach_devargs(char *identifier)
2978 struct rte_dev_iterator iterator;
2979 struct rte_devargs da;
2982 printf("Removing a device...\n");
2984 memset(&da, 0, sizeof(da));
2985 if (rte_devargs_parsef(&da, "%s", identifier)) {
2986 printf("cannot parse identifier\n");
2992 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2993 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2994 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2995 printf("Port %u not stopped\n", port_id);
2996 rte_eth_iterator_cleanup(&iterator);
2999 port_flow_flush(port_id);
3003 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3004 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3005 da.name, da.bus->name);
3009 remove_invalid_ports();
3011 printf("Device %s is detached\n", identifier);
3012 printf("Now total ports is %d\n", nb_ports);
3024 stop_packet_forwarding();
3026 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3028 if (mp_alloc_type == MP_ALLOC_ANON)
3029 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3033 if (ports != NULL) {
3035 RTE_ETH_FOREACH_DEV(pt_id) {
3036 printf("\nStopping port %d...\n", pt_id);
3040 RTE_ETH_FOREACH_DEV(pt_id) {
3041 printf("\nShutting down port %d...\n", pt_id);
3048 ret = rte_dev_event_monitor_stop();
3051 "fail to stop device event monitor.");
3055 ret = rte_dev_event_callback_unregister(NULL,
3056 dev_event_callback, NULL);
3059 "fail to unregister device event callback.\n");
3063 ret = rte_dev_hotplug_handle_disable();
3066 "fail to disable hotplug handling.\n");
3070 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3072 rte_mempool_free(mempools[i]);
3075 printf("\nBye...\n");
3078 typedef void (*cmd_func_t)(void);
3079 struct pmd_test_command {
3080 const char *cmd_name;
3081 cmd_func_t cmd_func;
3084 /* Check the link status of all ports in up to 9s, and print them finally */
3086 check_all_ports_link_status(uint32_t port_mask)
3088 #define CHECK_INTERVAL 100 /* 100ms */
3089 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3091 uint8_t count, all_ports_up, print_flag = 0;
3092 struct rte_eth_link link;
3094 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3096 printf("Checking link statuses...\n");
3098 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3100 RTE_ETH_FOREACH_DEV(portid) {
3101 if ((port_mask & (1 << portid)) == 0)
3103 memset(&link, 0, sizeof(link));
3104 ret = rte_eth_link_get_nowait(portid, &link);
3107 if (print_flag == 1)
3108 printf("Port %u link get failed: %s\n",
3109 portid, rte_strerror(-ret));
3112 /* print link status if flag set */
3113 if (print_flag == 1) {
3114 rte_eth_link_to_str(link_status,
3115 sizeof(link_status), &link);
3116 printf("Port %d %s\n", portid, link_status);
3119 /* clear all_ports_up flag if any link down */
3120 if (link.link_status == ETH_LINK_DOWN) {
3125 /* after finally printing all link status, get out */
3126 if (print_flag == 1)
3129 if (all_ports_up == 0) {
3131 rte_delay_ms(CHECK_INTERVAL);
3134 /* set the print_flag if all ports up or timeout */
3135 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3145 rmv_port_callback(void *arg)
3147 int need_to_start = 0;
3148 int org_no_link_check = no_link_check;
3149 portid_t port_id = (intptr_t)arg;
3150 struct rte_device *dev;
3152 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3154 if (!test_done && port_is_forwarding(port_id)) {
3156 stop_packet_forwarding();
3160 no_link_check = org_no_link_check;
3162 /* Save rte_device pointer before closing ethdev port */
3163 dev = rte_eth_devices[port_id].device;
3164 close_port(port_id);
3165 detach_device(dev); /* might be already removed or have more ports */
3168 start_packet_forwarding(0);
3171 /* This function is used by the interrupt thread */
3173 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3176 RTE_SET_USED(param);
3177 RTE_SET_USED(ret_param);
3179 if (type >= RTE_ETH_EVENT_MAX) {
3180 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3181 port_id, __func__, type);
3183 } else if (event_print_mask & (UINT32_C(1) << type)) {
3184 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3185 eth_event_desc[type]);
3190 case RTE_ETH_EVENT_NEW:
3191 ports[port_id].need_setup = 1;
3192 ports[port_id].port_status = RTE_PORT_HANDLING;
3194 case RTE_ETH_EVENT_INTR_RMV:
3195 if (port_id_is_invalid(port_id, DISABLED_WARN))
3197 if (rte_eal_alarm_set(100000,
3198 rmv_port_callback, (void *)(intptr_t)port_id))
3199 fprintf(stderr, "Could not set up deferred device removal\n");
3201 case RTE_ETH_EVENT_DESTROY:
3202 ports[port_id].port_status = RTE_PORT_CLOSED;
3203 printf("Port %u is closed\n", port_id);
3212 register_eth_event_callback(void)
3215 enum rte_eth_event_type event;
3217 for (event = RTE_ETH_EVENT_UNKNOWN;
3218 event < RTE_ETH_EVENT_MAX; event++) {
3219 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3224 TESTPMD_LOG(ERR, "Failed to register callback for "
3225 "%s event\n", eth_event_desc[event]);
3233 /* This function is used by the interrupt thread */
3235 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3236 __rte_unused void *arg)
3241 if (type >= RTE_DEV_EVENT_MAX) {
3242 fprintf(stderr, "%s called upon invalid event %d\n",
3248 case RTE_DEV_EVENT_REMOVE:
3249 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3251 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3253 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3258 * Because the user's callback is invoked in eal interrupt
3259 * callback, the interrupt callback need to be finished before
3260 * it can be unregistered when detaching device. So finish
3261 * callback soon and use a deferred removal to detach device
3262 * is need. It is a workaround, once the device detaching be
3263 * moved into the eal in the future, the deferred removal could
3266 if (rte_eal_alarm_set(100000,
3267 rmv_port_callback, (void *)(intptr_t)port_id))
3269 "Could not set up deferred device removal\n");
3271 case RTE_DEV_EVENT_ADD:
3272 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3274 /* TODO: After finish kernel driver binding,
3275 * begin to attach port.
3284 rxtx_port_config(struct rte_port *port)
3289 for (qid = 0; qid < nb_rxq; qid++) {
3290 offloads = port->rx_conf[qid].offloads;
3291 port->rx_conf[qid] = port->dev_info.default_rxconf;
3293 port->rx_conf[qid].offloads = offloads;
3295 /* Check if any Rx parameters have been passed */
3296 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3297 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3299 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3300 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3302 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3303 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3305 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3306 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3308 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3309 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3311 port->nb_rx_desc[qid] = nb_rxd;
3314 for (qid = 0; qid < nb_txq; qid++) {
3315 offloads = port->tx_conf[qid].offloads;
3316 port->tx_conf[qid] = port->dev_info.default_txconf;
3318 port->tx_conf[qid].offloads = offloads;
3320 /* Check if any Tx parameters have been passed */
3321 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3322 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3324 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3325 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3327 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3328 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3330 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3331 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3333 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3334 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3336 port->nb_tx_desc[qid] = nb_txd;
3341 init_port_config(void)
3344 struct rte_port *port;
3347 RTE_ETH_FOREACH_DEV(pid) {
3349 port->dev_conf.fdir_conf = fdir_conf;
3351 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3356 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3357 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3358 rss_hf & port->dev_info.flow_type_rss_offloads;
3360 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3361 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3364 if (port->dcb_flag == 0) {
3365 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3366 port->dev_conf.rxmode.mq_mode =
3367 (enum rte_eth_rx_mq_mode)
3368 (rx_mq_mode & ETH_MQ_RX_RSS);
3370 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3373 rxtx_port_config(port);
3375 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3379 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3380 rte_pmd_ixgbe_bypass_init(pid);
3383 if (lsc_interrupt &&
3384 (rte_eth_devices[pid].data->dev_flags &
3385 RTE_ETH_DEV_INTR_LSC))
3386 port->dev_conf.intr_conf.lsc = 1;
3387 if (rmv_interrupt &&
3388 (rte_eth_devices[pid].data->dev_flags &
3389 RTE_ETH_DEV_INTR_RMV))
3390 port->dev_conf.intr_conf.rmv = 1;
3394 void set_port_slave_flag(portid_t slave_pid)
3396 struct rte_port *port;
3398 port = &ports[slave_pid];
3399 port->slave_flag = 1;
3402 void clear_port_slave_flag(portid_t slave_pid)
3404 struct rte_port *port;
3406 port = &ports[slave_pid];
3407 port->slave_flag = 0;
3410 uint8_t port_is_bonding_slave(portid_t slave_pid)
3412 struct rte_port *port;
3414 port = &ports[slave_pid];
3415 if ((rte_eth_devices[slave_pid].data->dev_flags &
3416 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3421 const uint16_t vlan_tags[] = {
3422 0, 1, 2, 3, 4, 5, 6, 7,
3423 8, 9, 10, 11, 12, 13, 14, 15,
3424 16, 17, 18, 19, 20, 21, 22, 23,
3425 24, 25, 26, 27, 28, 29, 30, 31
3429 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3430 enum dcb_mode_enable dcb_mode,
3431 enum rte_eth_nb_tcs num_tcs,
3436 struct rte_eth_rss_conf rss_conf;
3439 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3440 * given above, and the number of traffic classes available for use.
3442 if (dcb_mode == DCB_VT_ENABLED) {
3443 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3444 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3445 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3446 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3448 /* VMDQ+DCB RX and TX configurations */
3449 vmdq_rx_conf->enable_default_pool = 0;
3450 vmdq_rx_conf->default_pool = 0;
3451 vmdq_rx_conf->nb_queue_pools =
3452 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3453 vmdq_tx_conf->nb_queue_pools =
3454 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3456 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3457 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3458 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3459 vmdq_rx_conf->pool_map[i].pools =
3460 1 << (i % vmdq_rx_conf->nb_queue_pools);
3462 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3463 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3464 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3467 /* set DCB mode of RX and TX of multiple queues */
3468 eth_conf->rxmode.mq_mode =
3469 (enum rte_eth_rx_mq_mode)
3470 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3471 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3473 struct rte_eth_dcb_rx_conf *rx_conf =
3474 ð_conf->rx_adv_conf.dcb_rx_conf;
3475 struct rte_eth_dcb_tx_conf *tx_conf =
3476 ð_conf->tx_adv_conf.dcb_tx_conf;
3478 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3480 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3484 rx_conf->nb_tcs = num_tcs;
3485 tx_conf->nb_tcs = num_tcs;
3487 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3488 rx_conf->dcb_tc[i] = i % num_tcs;
3489 tx_conf->dcb_tc[i] = i % num_tcs;
3492 eth_conf->rxmode.mq_mode =
3493 (enum rte_eth_rx_mq_mode)
3494 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3495 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3496 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3500 eth_conf->dcb_capability_en =
3501 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3503 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3509 init_port_dcb_config(portid_t pid,
3510 enum dcb_mode_enable dcb_mode,
3511 enum rte_eth_nb_tcs num_tcs,
3514 struct rte_eth_conf port_conf;
3515 struct rte_port *rte_port;
3519 rte_port = &ports[pid];
3521 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3522 /* Enter DCB configuration status */
3525 port_conf.rxmode = rte_port->dev_conf.rxmode;
3526 port_conf.txmode = rte_port->dev_conf.txmode;
3528 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3529 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3532 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3534 /* re-configure the device . */
3535 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3539 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3543 /* If dev_info.vmdq_pool_base is greater than 0,
3544 * the queue id of vmdq pools is started after pf queues.
3546 if (dcb_mode == DCB_VT_ENABLED &&
3547 rte_port->dev_info.vmdq_pool_base > 0) {
3548 printf("VMDQ_DCB multi-queue mode is nonsensical"
3549 " for port %d.", pid);
3553 /* Assume the ports in testpmd have the same dcb capability
3554 * and has the same number of rxq and txq in dcb mode
3556 if (dcb_mode == DCB_VT_ENABLED) {
3557 if (rte_port->dev_info.max_vfs > 0) {
3558 nb_rxq = rte_port->dev_info.nb_rx_queues;
3559 nb_txq = rte_port->dev_info.nb_tx_queues;
3561 nb_rxq = rte_port->dev_info.max_rx_queues;
3562 nb_txq = rte_port->dev_info.max_tx_queues;
3565 /*if vt is disabled, use all pf queues */
3566 if (rte_port->dev_info.vmdq_pool_base == 0) {
3567 nb_rxq = rte_port->dev_info.max_rx_queues;
3568 nb_txq = rte_port->dev_info.max_tx_queues;
3570 nb_rxq = (queueid_t)num_tcs;
3571 nb_txq = (queueid_t)num_tcs;
3575 rx_free_thresh = 64;
3577 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3579 rxtx_port_config(rte_port);
3581 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3582 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3583 rx_vft_set(pid, vlan_tags[i], 1);
3585 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3589 rte_port->dcb_flag = 1;
3599 /* Configuration of Ethernet ports. */
3600 ports = rte_zmalloc("testpmd: ports",
3601 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3602 RTE_CACHE_LINE_SIZE);
3603 if (ports == NULL) {
3604 rte_exit(EXIT_FAILURE,
3605 "rte_zmalloc(%d struct rte_port) failed\n",
3608 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3609 LIST_INIT(&ports[i].flow_tunnel_list);
3610 /* Initialize ports NUMA structures */
3611 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3612 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3613 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3627 const char clr[] = { 27, '[', '2', 'J', '\0' };
3628 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3630 /* Clear screen and move to top left */
3631 printf("%s%s", clr, top_left);
3633 printf("\nPort statistics ====================================");
3634 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3635 nic_stats_display(fwd_ports_ids[i]);
3641 signal_handler(int signum)
3643 if (signum == SIGINT || signum == SIGTERM) {
3644 printf("\nSignal %d received, preparing to exit...\n",
3646 #ifdef RTE_LIB_PDUMP
3647 /* uninitialize packet capture framework */
3650 #ifdef RTE_LIB_LATENCYSTATS
3651 if (latencystats_enabled != 0)
3652 rte_latencystats_uninit();
3655 /* Set flag to indicate the force termination. */
3657 /* exit with the expected status */
3658 signal(signum, SIG_DFL);
3659 kill(getpid(), signum);
3664 main(int argc, char** argv)
3671 signal(SIGINT, signal_handler);
3672 signal(SIGTERM, signal_handler);
3674 testpmd_logtype = rte_log_register("testpmd");
3675 if (testpmd_logtype < 0)
3676 rte_exit(EXIT_FAILURE, "Cannot register log type");
3677 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3679 diag = rte_eal_init(argc, argv);
3681 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3682 rte_strerror(rte_errno));
3684 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3685 rte_exit(EXIT_FAILURE,
3686 "Secondary process type not supported.\n");
3688 ret = register_eth_event_callback();
3690 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3692 #ifdef RTE_LIB_PDUMP
3693 /* initialize packet capture framework */
3698 RTE_ETH_FOREACH_DEV(port_id) {
3699 ports_ids[count] = port_id;
3702 nb_ports = (portid_t) count;
3704 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3706 /* allocate port structures, and init them */
3709 set_def_fwd_config();
3711 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3712 "Check the core mask argument\n");
3714 /* Bitrate/latency stats disabled by default */
3715 #ifdef RTE_LIB_BITRATESTATS
3716 bitrate_enabled = 0;
3718 #ifdef RTE_LIB_LATENCYSTATS
3719 latencystats_enabled = 0;
3722 /* on FreeBSD, mlockall() is disabled by default */
3723 #ifdef RTE_EXEC_ENV_FREEBSD
3732 launch_args_parse(argc, argv);
3734 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3735 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3739 if (tx_first && interactive)
3740 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3741 "interactive mode.\n");
3743 if (tx_first && lsc_interrupt) {
3744 printf("Warning: lsc_interrupt needs to be off when "
3745 " using tx_first. Disabling.\n");
3749 if (!nb_rxq && !nb_txq)
3750 printf("Warning: Either rx or tx queues should be non-zero\n");
3752 if (nb_rxq > 1 && nb_rxq > nb_txq)
3753 printf("Warning: nb_rxq=%d enables RSS configuration, "
3754 "but nb_txq=%d will prevent to fully test it.\n",
3760 ret = rte_dev_hotplug_handle_enable();
3763 "fail to enable hotplug handling.");
3767 ret = rte_dev_event_monitor_start();
3770 "fail to start device event monitoring.");
3774 ret = rte_dev_event_callback_register(NULL,
3775 dev_event_callback, NULL);
3778 "fail to register device event callback\n");
3783 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3784 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3786 /* set all ports to promiscuous mode by default */
3787 RTE_ETH_FOREACH_DEV(port_id) {
3788 ret = rte_eth_promiscuous_enable(port_id);
3790 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3791 port_id, rte_strerror(-ret));
3794 /* Init metrics library */
3795 rte_metrics_init(rte_socket_id());
3797 #ifdef RTE_LIB_LATENCYSTATS
3798 if (latencystats_enabled != 0) {
3799 int ret = rte_latencystats_init(1, NULL);
3801 printf("Warning: latencystats init()"
3802 " returned error %d\n", ret);
3803 printf("Latencystats running on lcore %d\n",
3804 latencystats_lcore_id);
3808 /* Setup bitrate stats */
3809 #ifdef RTE_LIB_BITRATESTATS
3810 if (bitrate_enabled != 0) {
3811 bitrate_data = rte_stats_bitrate_create();
3812 if (bitrate_data == NULL)
3813 rte_exit(EXIT_FAILURE,
3814 "Could not allocate bitrate data.\n");
3815 rte_stats_bitrate_reg(bitrate_data);
3819 #ifdef RTE_LIB_CMDLINE
3820 if (strlen(cmdline_filename) != 0)
3821 cmdline_read_from_file(cmdline_filename);
3823 if (interactive == 1) {
3825 printf("Start automatic packet forwarding\n");
3826 start_packet_forwarding(0);
3838 printf("No commandline core given, start packet forwarding\n");
3839 start_packet_forwarding(tx_first);
3840 if (stats_period != 0) {
3841 uint64_t prev_time = 0, cur_time, diff_time = 0;
3842 uint64_t timer_period;
3844 /* Convert to number of cycles */
3845 timer_period = stats_period * rte_get_timer_hz();
3847 while (f_quit == 0) {
3848 cur_time = rte_get_timer_cycles();
3849 diff_time += cur_time - prev_time;
3851 if (diff_time >= timer_period) {
3853 /* Reset the timer */
3856 /* Sleep to avoid unnecessary checks */
3857 prev_time = cur_time;
3862 printf("Press enter to exit\n");
3863 rc = read(0, &c, 1);
3869 ret = rte_eal_cleanup();
3871 rte_exit(EXIT_FAILURE,
3872 "EAL cleanup failed: %s\n", strerror(-ret));
3874 return EXIT_SUCCESS;