1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
52 #include <rte_pmd_ixgbe.h>
55 #include <rte_pdump.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
65 #ifdef RTE_EXEC_ENV_WINDOWS
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
75 #define HUGE_FLAG MAP_HUGETLB
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
95 char cmdline_filename[PATH_MAX] = {0};
98 * NUMA support configuration.
99 * When set, the NUMA support attempts to dispatch the allocation of the
100 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101 * probed ports among the CPU sockets 0 and 1.
102 * Otherwise, all memory is allocated from CPU socket 0.
104 uint8_t numa_support = 1; /**< numa enabled by default */
107 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110 uint8_t socket_num = UMA_NO_CONFIG;
113 * Select mempool allocation type:
114 * - native: use regular DPDK memory
115 * - anon: use regular DPDK memory to create mempool, but populate using
116 * anonymous memory (may not be IOVA-contiguous)
117 * - xmem: use externally allocated hugepage memory
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
122 * Store specified sockets on which memory pool to be used by ports
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which RX ring to be used by ports
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
134 * Store specified sockets on which TX ring to be used by ports
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
140 * Record the Ethernet address of peer target ports to which packets are
142 * Must be instantiated with the ethernet addresses of peer traffic generator
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
149 * Probed Target Environment.
151 struct rte_port *ports; /**< For all probed ethernet ports. */
152 portid_t nb_ports; /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
159 * Test Forwarding Configuration.
160 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t nb_cfg_ports; /**< Number of configured ports. */
166 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
175 * Forwarding engines.
177 struct fwd_engine * fwd_engines[] = {
187 &five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 &ieee1588_fwd_engine,
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
208 * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
212 * In container, it cannot terminate the process which running with 'stats-period'
213 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
218 * Configuration of packet segments used to scatter received packets
219 * if some of split features is configured.
221 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
222 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
223 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
224 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
227 * Configuration of packet segments used by the "txonly" processing engine.
229 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
230 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
231 TXONLY_DEF_PACKET_LEN,
233 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
235 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
236 /**< Split policy for packets to TX. */
238 uint8_t txonly_multi_flow;
239 /**< Whether multiple flows are generated in TXONLY mode. */
241 uint32_t tx_pkt_times_inter;
242 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
244 uint32_t tx_pkt_times_intra;
245 /**< Timings for send scheduling in TXONLY mode, time between packets. */
247 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
248 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
249 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
251 /* current configuration is in DCB or not,0 means it is not in DCB mode */
252 uint8_t dcb_config = 0;
255 * Configurable number of RX/TX queues.
257 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
258 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
259 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
262 * Configurable number of RX/TX ring descriptors.
263 * Defaults are supplied by drivers via ethdev.
265 #define RTE_TEST_RX_DESC_DEFAULT 0
266 #define RTE_TEST_TX_DESC_DEFAULT 0
267 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
268 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
270 #define RTE_PMD_PARAM_UNSET -1
272 * Configurable values of RX and TX ring threshold registers.
275 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
276 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
277 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
279 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
280 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
281 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of RX free threshold.
286 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
289 * Configurable value of RX drop enable.
291 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
294 * Configurable value of TX free threshold.
296 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
299 * Configurable value of TX RS bit threshold.
301 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
304 * Configurable value of buffered packets before sending.
306 uint16_t noisy_tx_sw_bufsz;
309 * Configurable value of packet buffer timeout.
311 uint16_t noisy_tx_sw_buf_flush_time;
314 * Configurable value for size of VNF internal memory area
315 * used for simulating noisy neighbour behaviour
317 uint64_t noisy_lkup_mem_sz;
320 * Configurable value of number of random writes done in
321 * VNF simulation memory area.
323 uint64_t noisy_lkup_num_writes;
326 * Configurable value of number of random reads done in
327 * VNF simulation memory area.
329 uint64_t noisy_lkup_num_reads;
332 * Configurable value of number of random reads/writes done in
333 * VNF simulation memory area.
335 uint64_t noisy_lkup_num_reads_writes;
338 * Receive Side Scaling (RSS) configuration.
340 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
343 * Port topology configuration
345 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
348 * Avoids to flush all the RX streams before starts forwarding.
350 uint8_t no_flush_rx = 0; /* flush by default */
353 * Flow API isolated mode.
355 uint8_t flow_isolate_all;
358 * Avoids to check link status when starting/stopping a port.
360 uint8_t no_link_check = 0; /* check by default */
363 * Don't automatically start all ports in interactive mode.
365 uint8_t no_device_start = 0;
368 * Enable link status change notification
370 uint8_t lsc_interrupt = 1; /* enabled by default */
373 * Enable device removal notification.
375 uint8_t rmv_interrupt = 1; /* enabled by default */
377 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
379 /* After attach, port setup is called on event or by iterator */
380 bool setup_on_probe_event = true;
382 /* Clear ptypes on port initialization. */
383 uint8_t clear_ptypes = true;
385 /* Hairpin ports configuration mode. */
386 uint16_t hairpin_mode;
388 /* Pretty printing of ethdev events */
389 static const char * const eth_event_desc[] = {
390 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
391 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
392 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
393 [RTE_ETH_EVENT_INTR_RESET] = "reset",
394 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
395 [RTE_ETH_EVENT_IPSEC] = "IPsec",
396 [RTE_ETH_EVENT_MACSEC] = "MACsec",
397 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
398 [RTE_ETH_EVENT_NEW] = "device probed",
399 [RTE_ETH_EVENT_DESTROY] = "device released",
400 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
401 [RTE_ETH_EVENT_MAX] = NULL,
405 * Display or mask ether events
406 * Default to all events except VF_MBOX
408 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
409 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
410 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
411 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
412 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
413 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
414 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
415 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
417 * Decide if all memory are locked for performance.
422 * NIC bypass mode configuration options.
425 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
426 /* The NIC bypass watchdog timeout. */
427 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
431 #ifdef RTE_LIB_LATENCYSTATS
434 * Set when latency stats is enabled in the commandline
436 uint8_t latencystats_enabled;
439 * Lcore ID to serive latency statistics.
441 lcoreid_t latencystats_lcore_id = -1;
446 * Ethernet device configuration.
448 struct rte_eth_rxmode rx_mode = {
449 /* Default maximum frame length.
450 * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
456 struct rte_eth_txmode tx_mode = {
457 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
460 struct rte_fdir_conf fdir_conf = {
461 .mode = RTE_FDIR_MODE_NONE,
462 .pballoc = RTE_FDIR_PBALLOC_64K,
463 .status = RTE_FDIR_REPORT_STATUS,
465 .vlan_tci_mask = 0xFFEF,
467 .src_ip = 0xFFFFFFFF,
468 .dst_ip = 0xFFFFFFFF,
471 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
472 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
474 .src_port_mask = 0xFFFF,
475 .dst_port_mask = 0xFFFF,
476 .mac_addr_byte_mask = 0xFF,
477 .tunnel_type_mask = 1,
478 .tunnel_id_mask = 0xFFFFFFFF,
483 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
486 * Display zero values by default for xstats
488 uint8_t xstats_hide_zero;
491 * Measure of CPU cycles disabled by default
493 uint8_t record_core_cycles;
496 * Display of RX and TX bursts disabled by default
498 uint8_t record_burst_stats;
500 unsigned int num_sockets = 0;
501 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
503 #ifdef RTE_LIB_BITRATESTATS
504 /* Bitrate statistics */
505 struct rte_stats_bitrates *bitrate_data;
506 lcoreid_t bitrate_lcore_id;
507 uint8_t bitrate_enabled;
510 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
511 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
514 * hexadecimal bitmask of RX mq mode can be enabled.
516 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
519 * Used to set forced link speed
521 uint32_t eth_link_speed;
523 /* Forward function declarations */
524 static void setup_attached_port(portid_t pi);
525 static void check_all_ports_link_status(uint32_t port_mask);
526 static int eth_event_callback(portid_t port_id,
527 enum rte_eth_event_type type,
528 void *param, void *ret_param);
529 static void dev_event_callback(const char *device_name,
530 enum rte_dev_event_type type,
534 * Check if all the ports are started.
535 * If yes, return positive value. If not, return zero.
537 static int all_ports_started(void);
539 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
540 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
542 /* Holds the registered mbuf dynamic flags names. */
543 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
546 * Helper function to check if socket is already discovered.
547 * If yes, return positive value. If not, return zero.
550 new_socket_id(unsigned int socket_id)
554 for (i = 0; i < num_sockets; i++) {
555 if (socket_ids[i] == socket_id)
562 * Setup default configuration.
565 set_default_fwd_lcores_config(void)
569 unsigned int sock_num;
572 for (i = 0; i < RTE_MAX_LCORE; i++) {
573 if (!rte_lcore_is_enabled(i))
575 sock_num = rte_lcore_to_socket_id(i);
576 if (new_socket_id(sock_num)) {
577 if (num_sockets >= RTE_MAX_NUMA_NODES) {
578 rte_exit(EXIT_FAILURE,
579 "Total sockets greater than %u\n",
582 socket_ids[num_sockets++] = sock_num;
584 if (i == rte_get_main_lcore())
586 fwd_lcores_cpuids[nb_lc++] = i;
588 nb_lcores = (lcoreid_t) nb_lc;
589 nb_cfg_lcores = nb_lcores;
594 set_def_peer_eth_addrs(void)
598 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
599 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
600 peer_eth_addrs[i].addr_bytes[5] = i;
605 set_default_fwd_ports_config(void)
610 RTE_ETH_FOREACH_DEV(pt_id) {
611 fwd_ports_ids[i++] = pt_id;
613 /* Update sockets info according to the attached device */
614 int socket_id = rte_eth_dev_socket_id(pt_id);
615 if (socket_id >= 0 && new_socket_id(socket_id)) {
616 if (num_sockets >= RTE_MAX_NUMA_NODES) {
617 rte_exit(EXIT_FAILURE,
618 "Total sockets greater than %u\n",
621 socket_ids[num_sockets++] = socket_id;
625 nb_cfg_ports = nb_ports;
626 nb_fwd_ports = nb_ports;
630 set_def_fwd_config(void)
632 set_default_fwd_lcores_config();
633 set_def_peer_eth_addrs();
634 set_default_fwd_ports_config();
637 #ifndef RTE_EXEC_ENV_WINDOWS
638 /* extremely pessimistic estimation of memory required to create a mempool */
640 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
642 unsigned int n_pages, mbuf_per_pg, leftover;
643 uint64_t total_mem, mbuf_mem, obj_sz;
645 /* there is no good way to predict how much space the mempool will
646 * occupy because it will allocate chunks on the fly, and some of those
647 * will come from default DPDK memory while some will come from our
648 * external memory, so just assume 128MB will be enough for everyone.
650 uint64_t hdr_mem = 128 << 20;
652 /* account for possible non-contiguousness */
653 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
655 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
659 mbuf_per_pg = pgsz / obj_sz;
660 leftover = (nb_mbufs % mbuf_per_pg) > 0;
661 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
663 mbuf_mem = n_pages * pgsz;
665 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
667 if (total_mem > SIZE_MAX) {
668 TESTPMD_LOG(ERR, "Memory size too big\n");
671 *out = (size_t)total_mem;
677 pagesz_flags(uint64_t page_sz)
679 /* as per mmap() manpage, all page sizes are log2 of page size
680 * shifted by MAP_HUGE_SHIFT
682 int log2 = rte_log2_u64(page_sz);
684 return (log2 << HUGE_SHIFT);
688 alloc_mem(size_t memsz, size_t pgsz, bool huge)
693 /* allocate anonymous hugepages */
694 flags = MAP_ANONYMOUS | MAP_PRIVATE;
696 flags |= HUGE_FLAG | pagesz_flags(pgsz);
698 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
699 if (addr == MAP_FAILED)
705 struct extmem_param {
709 rte_iova_t *iova_table;
710 unsigned int iova_table_len;
714 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
717 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
718 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
719 unsigned int cur_page, n_pages, pgsz_idx;
720 size_t mem_sz, cur_pgsz;
721 rte_iova_t *iovas = NULL;
725 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
726 /* skip anything that is too big */
727 if (pgsizes[pgsz_idx] > SIZE_MAX)
730 cur_pgsz = pgsizes[pgsz_idx];
732 /* if we were told not to allocate hugepages, override */
734 cur_pgsz = sysconf(_SC_PAGESIZE);
736 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
738 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
742 /* allocate our memory */
743 addr = alloc_mem(mem_sz, cur_pgsz, huge);
745 /* if we couldn't allocate memory with a specified page size,
746 * that doesn't mean we can't do it with other page sizes, so
752 /* store IOVA addresses for every page in this memory area */
753 n_pages = mem_sz / cur_pgsz;
755 iovas = malloc(sizeof(*iovas) * n_pages);
758 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
761 /* lock memory if it's not huge pages */
765 /* populate IOVA addresses */
766 for (cur_page = 0; cur_page < n_pages; cur_page++) {
771 offset = cur_pgsz * cur_page;
772 cur = RTE_PTR_ADD(addr, offset);
774 /* touch the page before getting its IOVA */
775 *(volatile char *)cur = 0;
777 iova = rte_mem_virt2iova(cur);
779 iovas[cur_page] = iova;
784 /* if we couldn't allocate anything */
790 param->pgsz = cur_pgsz;
791 param->iova_table = iovas;
792 param->iova_table_len = n_pages;
799 munmap(addr, mem_sz);
805 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
807 struct extmem_param param;
810 memset(¶m, 0, sizeof(param));
812 /* check if our heap exists */
813 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
815 /* create our heap */
816 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
818 TESTPMD_LOG(ERR, "Cannot create heap\n");
823 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
825 TESTPMD_LOG(ERR, "Cannot create memory area\n");
829 /* we now have a valid memory area, so add it to heap */
830 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
831 param.addr, param.len, param.iova_table,
832 param.iova_table_len, param.pgsz);
834 /* when using VFIO, memory is automatically mapped for DMA by EAL */
836 /* not needed any more */
837 free(param.iova_table);
840 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
841 munmap(param.addr, param.len);
847 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
853 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
854 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
859 RTE_ETH_FOREACH_DEV(pid) {
860 struct rte_eth_dev *dev =
861 &rte_eth_devices[pid];
863 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
867 "unable to DMA unmap addr 0x%p "
869 memhdr->addr, dev->data->name);
872 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
875 "unable to un-register addr 0x%p\n", memhdr->addr);
880 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
881 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
884 size_t page_size = sysconf(_SC_PAGESIZE);
887 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
891 "unable to register addr 0x%p\n", memhdr->addr);
894 RTE_ETH_FOREACH_DEV(pid) {
895 struct rte_eth_dev *dev =
896 &rte_eth_devices[pid];
898 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
902 "unable to DMA map addr 0x%p "
904 memhdr->addr, dev->data->name);
911 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
912 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
914 struct rte_pktmbuf_extmem *xmem;
915 unsigned int ext_num, zone_num, elt_num;
918 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
919 elt_num = EXTBUF_ZONE_SIZE / elt_size;
920 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
922 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
924 TESTPMD_LOG(ERR, "Cannot allocate memory for "
925 "external buffer descriptors\n");
929 for (ext_num = 0; ext_num < zone_num; ext_num++) {
930 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
931 const struct rte_memzone *mz;
932 char mz_name[RTE_MEMZONE_NAMESIZE];
935 ret = snprintf(mz_name, sizeof(mz_name),
936 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
937 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
938 errno = ENAMETOOLONG;
942 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
944 RTE_MEMZONE_IOVA_CONTIG |
946 RTE_MEMZONE_SIZE_HINT_ONLY,
950 * The caller exits on external buffer creation
951 * error, so there is no need to free memzones.
957 xseg->buf_ptr = mz->addr;
958 xseg->buf_iova = mz->iova;
959 xseg->buf_len = EXTBUF_ZONE_SIZE;
960 xseg->elt_size = elt_size;
962 if (ext_num == 0 && xmem != NULL) {
971 * Configuration initialisation done once at init time.
973 static struct rte_mempool *
974 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
975 unsigned int socket_id, uint16_t size_idx)
977 char pool_name[RTE_MEMPOOL_NAMESIZE];
978 struct rte_mempool *rte_mp = NULL;
979 #ifndef RTE_EXEC_ENV_WINDOWS
982 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
984 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
987 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
988 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
990 switch (mp_alloc_type) {
991 case MP_ALLOC_NATIVE:
993 /* wrapper to rte_mempool_create() */
994 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
995 rte_mbuf_best_mempool_ops());
996 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
997 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1000 #ifndef RTE_EXEC_ENV_WINDOWS
1003 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1004 mb_size, (unsigned int) mb_mempool_cache,
1005 sizeof(struct rte_pktmbuf_pool_private),
1006 socket_id, mempool_flags);
1010 if (rte_mempool_populate_anon(rte_mp) == 0) {
1011 rte_mempool_free(rte_mp);
1015 rte_pktmbuf_pool_init(rte_mp, NULL);
1016 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1017 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1021 case MP_ALLOC_XMEM_HUGE:
1024 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1026 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1027 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1030 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1031 if (heap_socket < 0)
1032 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1034 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1035 rte_mbuf_best_mempool_ops());
1036 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1037 mb_mempool_cache, 0, mbuf_seg_size,
1044 struct rte_pktmbuf_extmem *ext_mem;
1045 unsigned int ext_num;
1047 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1048 socket_id, pool_name, &ext_mem);
1050 rte_exit(EXIT_FAILURE,
1051 "Can't create pinned data buffers\n");
1053 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1054 rte_mbuf_best_mempool_ops());
1055 rte_mp = rte_pktmbuf_pool_create_extbuf
1056 (pool_name, nb_mbuf, mb_mempool_cache,
1057 0, mbuf_seg_size, socket_id,
1064 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1068 #ifndef RTE_EXEC_ENV_WINDOWS
1071 if (rte_mp == NULL) {
1072 rte_exit(EXIT_FAILURE,
1073 "Creation of mbuf pool for socket %u failed: %s\n",
1074 socket_id, rte_strerror(rte_errno));
1075 } else if (verbose_level > 0) {
1076 rte_mempool_dump(stdout, rte_mp);
1082 * Check given socket id is valid or not with NUMA mode,
1083 * if valid, return 0, else return -1
1086 check_socket_id(const unsigned int socket_id)
1088 static int warning_once = 0;
1090 if (new_socket_id(socket_id)) {
1091 if (!warning_once && numa_support)
1092 printf("Warning: NUMA should be configured manually by"
1093 " using --port-numa-config and"
1094 " --ring-numa-config parameters along with"
1103 * Get the allowed maximum number of RX queues.
1104 * *pid return the port id which has minimal value of
1105 * max_rx_queues in all ports.
1108 get_allowed_max_nb_rxq(portid_t *pid)
1110 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1111 bool max_rxq_valid = false;
1113 struct rte_eth_dev_info dev_info;
1115 RTE_ETH_FOREACH_DEV(pi) {
1116 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1119 max_rxq_valid = true;
1120 if (dev_info.max_rx_queues < allowed_max_rxq) {
1121 allowed_max_rxq = dev_info.max_rx_queues;
1125 return max_rxq_valid ? allowed_max_rxq : 0;
1129 * Check input rxq is valid or not.
1130 * If input rxq is not greater than any of maximum number
1131 * of RX queues of all ports, it is valid.
1132 * if valid, return 0, else return -1
1135 check_nb_rxq(queueid_t rxq)
1137 queueid_t allowed_max_rxq;
1140 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1141 if (rxq > allowed_max_rxq) {
1142 printf("Fail: input rxq (%u) can't be greater "
1143 "than max_rx_queues (%u) of port %u\n",
1153 * Get the allowed maximum number of TX queues.
1154 * *pid return the port id which has minimal value of
1155 * max_tx_queues in all ports.
1158 get_allowed_max_nb_txq(portid_t *pid)
1160 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1161 bool max_txq_valid = false;
1163 struct rte_eth_dev_info dev_info;
1165 RTE_ETH_FOREACH_DEV(pi) {
1166 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1169 max_txq_valid = true;
1170 if (dev_info.max_tx_queues < allowed_max_txq) {
1171 allowed_max_txq = dev_info.max_tx_queues;
1175 return max_txq_valid ? allowed_max_txq : 0;
1179 * Check input txq is valid or not.
1180 * If input txq is not greater than any of maximum number
1181 * of TX queues of all ports, it is valid.
1182 * if valid, return 0, else return -1
1185 check_nb_txq(queueid_t txq)
1187 queueid_t allowed_max_txq;
1190 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1191 if (txq > allowed_max_txq) {
1192 printf("Fail: input txq (%u) can't be greater "
1193 "than max_tx_queues (%u) of port %u\n",
1203 * Get the allowed maximum number of RXDs of every rx queue.
1204 * *pid return the port id which has minimal value of
1205 * max_rxd in all queues of all ports.
1208 get_allowed_max_nb_rxd(portid_t *pid)
1210 uint16_t allowed_max_rxd = UINT16_MAX;
1212 struct rte_eth_dev_info dev_info;
1214 RTE_ETH_FOREACH_DEV(pi) {
1215 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1218 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1219 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1223 return allowed_max_rxd;
1227 * Get the allowed minimal number of RXDs of every rx queue.
1228 * *pid return the port id which has minimal value of
1229 * min_rxd in all queues of all ports.
1232 get_allowed_min_nb_rxd(portid_t *pid)
1234 uint16_t allowed_min_rxd = 0;
1236 struct rte_eth_dev_info dev_info;
1238 RTE_ETH_FOREACH_DEV(pi) {
1239 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1242 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1243 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1248 return allowed_min_rxd;
1252 * Check input rxd is valid or not.
1253 * If input rxd is not greater than any of maximum number
1254 * of RXDs of every Rx queues and is not less than any of
1255 * minimal number of RXDs of every Rx queues, it is valid.
1256 * if valid, return 0, else return -1
1259 check_nb_rxd(queueid_t rxd)
1261 uint16_t allowed_max_rxd;
1262 uint16_t allowed_min_rxd;
1265 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1266 if (rxd > allowed_max_rxd) {
1267 printf("Fail: input rxd (%u) can't be greater "
1268 "than max_rxds (%u) of port %u\n",
1275 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1276 if (rxd < allowed_min_rxd) {
1277 printf("Fail: input rxd (%u) can't be less "
1278 "than min_rxds (%u) of port %u\n",
1289 * Get the allowed maximum number of TXDs of every rx queues.
1290 * *pid return the port id which has minimal value of
1291 * max_txd in every tx queue.
1294 get_allowed_max_nb_txd(portid_t *pid)
1296 uint16_t allowed_max_txd = UINT16_MAX;
1298 struct rte_eth_dev_info dev_info;
1300 RTE_ETH_FOREACH_DEV(pi) {
1301 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1304 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1305 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1309 return allowed_max_txd;
1313 * Get the allowed maximum number of TXDs of every tx queues.
1314 * *pid return the port id which has minimal value of
1315 * min_txd in every tx queue.
1318 get_allowed_min_nb_txd(portid_t *pid)
1320 uint16_t allowed_min_txd = 0;
1322 struct rte_eth_dev_info dev_info;
1324 RTE_ETH_FOREACH_DEV(pi) {
1325 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1328 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1329 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1334 return allowed_min_txd;
1338 * Check input txd is valid or not.
1339 * If input txd is not greater than any of maximum number
1340 * of TXDs of every Rx queues, it is valid.
1341 * if valid, return 0, else return -1
1344 check_nb_txd(queueid_t txd)
1346 uint16_t allowed_max_txd;
1347 uint16_t allowed_min_txd;
1350 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1351 if (txd > allowed_max_txd) {
1352 printf("Fail: input txd (%u) can't be greater "
1353 "than max_txds (%u) of port %u\n",
1360 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1361 if (txd < allowed_min_txd) {
1362 printf("Fail: input txd (%u) can't be less "
1363 "than min_txds (%u) of port %u\n",
1374 * Get the allowed maximum number of hairpin queues.
1375 * *pid return the port id which has minimal value of
1376 * max_hairpin_queues in all ports.
1379 get_allowed_max_nb_hairpinq(portid_t *pid)
1381 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1383 struct rte_eth_hairpin_cap cap;
1385 RTE_ETH_FOREACH_DEV(pi) {
1386 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1390 if (cap.max_nb_queues < allowed_max_hairpinq) {
1391 allowed_max_hairpinq = cap.max_nb_queues;
1395 return allowed_max_hairpinq;
1399 * Check input hairpin is valid or not.
1400 * If input hairpin is not greater than any of maximum number
1401 * of hairpin queues of all ports, it is valid.
1402 * if valid, return 0, else return -1
1405 check_nb_hairpinq(queueid_t hairpinq)
1407 queueid_t allowed_max_hairpinq;
1410 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1411 if (hairpinq > allowed_max_hairpinq) {
1412 printf("Fail: input hairpin (%u) can't be greater "
1413 "than max_hairpin_queues (%u) of port %u\n",
1414 hairpinq, allowed_max_hairpinq, pid);
1421 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1423 struct rte_port *port = &ports[pid];
1428 port->dev_conf.txmode = tx_mode;
1429 port->dev_conf.rxmode = rx_mode;
1431 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1433 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1435 ret = update_jumbo_frame_offload(pid);
1437 printf("Updating jumbo frame offload failed for port %u\n",
1440 if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1441 port->dev_conf.txmode.offloads &=
1442 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1444 /* Apply Rx offloads configuration */
1445 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1446 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1447 /* Apply Tx offloads configuration */
1448 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1449 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1452 port->dev_conf.link_speeds = eth_link_speed;
1454 /* set flag to initialize port/queue */
1455 port->need_reconfig = 1;
1456 port->need_reconfig_queues = 1;
1457 port->socket_id = socket_id;
1458 port->tx_metadata = 0;
1461 * Check for maximum number of segments per MTU.
1462 * Accordingly update the mbuf data size.
1464 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1465 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1466 data_size = rx_mode.max_rx_pkt_len /
1467 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1469 if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
1470 mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
1471 TESTPMD_LOG(WARNING,
1472 "Configured mbuf size of the first segment %hu\n",
1482 struct rte_mempool *mbp;
1483 unsigned int nb_mbuf_per_pool;
1485 struct rte_gro_param gro_param;
1488 /* Configuration of logical cores. */
1489 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1490 sizeof(struct fwd_lcore *) * nb_lcores,
1491 RTE_CACHE_LINE_SIZE);
1492 if (fwd_lcores == NULL) {
1493 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1494 "failed\n", nb_lcores);
1496 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1497 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1498 sizeof(struct fwd_lcore),
1499 RTE_CACHE_LINE_SIZE);
1500 if (fwd_lcores[lc_id] == NULL) {
1501 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1504 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1507 RTE_ETH_FOREACH_DEV(pid) {
1511 socket_id = port_numa[pid];
1512 if (port_numa[pid] == NUMA_NO_CONFIG) {
1513 socket_id = rte_eth_dev_socket_id(pid);
1516 * if socket_id is invalid,
1517 * set to the first available socket.
1519 if (check_socket_id(socket_id) < 0)
1520 socket_id = socket_ids[0];
1523 socket_id = (socket_num == UMA_NO_CONFIG) ?
1526 /* Apply default TxRx configuration for all ports */
1527 init_config_port_offloads(pid, socket_id);
1530 * Create pools of mbuf.
1531 * If NUMA support is disabled, create a single pool of mbuf in
1532 * socket 0 memory by default.
1533 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1535 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1536 * nb_txd can be configured at run time.
1538 if (param_total_num_mbufs)
1539 nb_mbuf_per_pool = param_total_num_mbufs;
1541 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1542 (nb_lcores * mb_mempool_cache) +
1543 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1544 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1550 for (i = 0; i < num_sockets; i++)
1551 for (j = 0; j < mbuf_data_size_n; j++)
1552 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1553 mbuf_pool_create(mbuf_data_size[j],
1559 for (i = 0; i < mbuf_data_size_n; i++)
1560 mempools[i] = mbuf_pool_create
1563 socket_num == UMA_NO_CONFIG ?
1569 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1570 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1572 * Records which Mbuf pool to use by each logical core, if needed.
1574 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1575 mbp = mbuf_pool_find(
1576 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1579 mbp = mbuf_pool_find(0, 0);
1580 fwd_lcores[lc_id]->mbp = mbp;
1581 /* initialize GSO context */
1582 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1583 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1584 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1585 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1587 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1592 /* create a gro context for each lcore */
1593 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1594 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1595 gro_param.max_item_per_flow = MAX_PKT_BURST;
1596 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1597 gro_param.socket_id = rte_lcore_to_socket_id(
1598 fwd_lcores_cpuids[lc_id]);
1599 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1600 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1601 rte_exit(EXIT_FAILURE,
1602 "rte_gro_ctx_create() failed\n");
1609 reconfig(portid_t new_port_id, unsigned socket_id)
1611 /* Reconfiguration of Ethernet ports. */
1612 init_config_port_offloads(new_port_id, socket_id);
1618 init_fwd_streams(void)
1621 struct rte_port *port;
1622 streamid_t sm_id, nb_fwd_streams_new;
1625 /* set socket id according to numa or not */
1626 RTE_ETH_FOREACH_DEV(pid) {
1628 if (nb_rxq > port->dev_info.max_rx_queues) {
1629 printf("Fail: nb_rxq(%d) is greater than "
1630 "max_rx_queues(%d)\n", nb_rxq,
1631 port->dev_info.max_rx_queues);
1634 if (nb_txq > port->dev_info.max_tx_queues) {
1635 printf("Fail: nb_txq(%d) is greater than "
1636 "max_tx_queues(%d)\n", nb_txq,
1637 port->dev_info.max_tx_queues);
1641 if (port_numa[pid] != NUMA_NO_CONFIG)
1642 port->socket_id = port_numa[pid];
1644 port->socket_id = rte_eth_dev_socket_id(pid);
1647 * if socket_id is invalid,
1648 * set to the first available socket.
1650 if (check_socket_id(port->socket_id) < 0)
1651 port->socket_id = socket_ids[0];
1655 if (socket_num == UMA_NO_CONFIG)
1656 port->socket_id = 0;
1658 port->socket_id = socket_num;
1662 q = RTE_MAX(nb_rxq, nb_txq);
1664 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1667 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1668 if (nb_fwd_streams_new == nb_fwd_streams)
1671 if (fwd_streams != NULL) {
1672 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1673 if (fwd_streams[sm_id] == NULL)
1675 rte_free(fwd_streams[sm_id]);
1676 fwd_streams[sm_id] = NULL;
1678 rte_free(fwd_streams);
1683 nb_fwd_streams = nb_fwd_streams_new;
1684 if (nb_fwd_streams) {
1685 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1686 sizeof(struct fwd_stream *) * nb_fwd_streams,
1687 RTE_CACHE_LINE_SIZE);
1688 if (fwd_streams == NULL)
1689 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1690 " (struct fwd_stream *)) failed\n",
1693 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1694 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1695 " struct fwd_stream", sizeof(struct fwd_stream),
1696 RTE_CACHE_LINE_SIZE);
1697 if (fwd_streams[sm_id] == NULL)
1698 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1699 "(struct fwd_stream) failed\n");
1707 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1709 uint64_t total_burst, sburst;
1711 uint64_t burst_stats[4];
1712 uint16_t pktnb_stats[4];
1714 int burst_percent[4], sburstp;
1718 * First compute the total number of packet bursts and the
1719 * two highest numbers of bursts of the same number of packets.
1721 memset(&burst_stats, 0x0, sizeof(burst_stats));
1722 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1724 /* Show stats for 0 burst size always */
1725 total_burst = pbs->pkt_burst_spread[0];
1726 burst_stats[0] = pbs->pkt_burst_spread[0];
1729 /* Find the next 2 burst sizes with highest occurrences. */
1730 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1731 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1736 total_burst += nb_burst;
1738 if (nb_burst > burst_stats[1]) {
1739 burst_stats[2] = burst_stats[1];
1740 pktnb_stats[2] = pktnb_stats[1];
1741 burst_stats[1] = nb_burst;
1742 pktnb_stats[1] = nb_pkt;
1743 } else if (nb_burst > burst_stats[2]) {
1744 burst_stats[2] = nb_burst;
1745 pktnb_stats[2] = nb_pkt;
1748 if (total_burst == 0)
1751 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1752 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1754 printf("%d%% of other]\n", 100 - sburstp);
1758 sburst += burst_stats[i];
1759 if (sburst == total_burst) {
1760 printf("%d%% of %d pkts]\n",
1761 100 - sburstp, (int) pktnb_stats[i]);
1766 (double)burst_stats[i] / total_burst * 100;
1767 printf("%d%% of %d pkts + ",
1768 burst_percent[i], (int) pktnb_stats[i]);
1769 sburstp += burst_percent[i];
1774 fwd_stream_stats_display(streamid_t stream_id)
1776 struct fwd_stream *fs;
1777 static const char *fwd_top_stats_border = "-------";
1779 fs = fwd_streams[stream_id];
1780 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1781 (fs->fwd_dropped == 0))
1783 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1784 "TX Port=%2d/Queue=%2d %s\n",
1785 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1786 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1787 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1788 " TX-dropped: %-14"PRIu64,
1789 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1791 /* if checksum mode */
1792 if (cur_fwd_eng == &csum_fwd_engine) {
1793 printf(" RX- bad IP checksum: %-14"PRIu64
1794 " Rx- bad L4 checksum: %-14"PRIu64
1795 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1796 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1797 fs->rx_bad_outer_l4_csum);
1798 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1799 fs->rx_bad_outer_ip_csum);
1804 if (record_burst_stats) {
1805 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1806 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1811 fwd_stats_display(void)
1813 static const char *fwd_stats_border = "----------------------";
1814 static const char *acc_stats_border = "+++++++++++++++";
1816 struct fwd_stream *rx_stream;
1817 struct fwd_stream *tx_stream;
1818 uint64_t tx_dropped;
1819 uint64_t rx_bad_ip_csum;
1820 uint64_t rx_bad_l4_csum;
1821 uint64_t rx_bad_outer_l4_csum;
1822 uint64_t rx_bad_outer_ip_csum;
1823 } ports_stats[RTE_MAX_ETHPORTS];
1824 uint64_t total_rx_dropped = 0;
1825 uint64_t total_tx_dropped = 0;
1826 uint64_t total_rx_nombuf = 0;
1827 struct rte_eth_stats stats;
1828 uint64_t fwd_cycles = 0;
1829 uint64_t total_recv = 0;
1830 uint64_t total_xmit = 0;
1831 struct rte_port *port;
1836 memset(ports_stats, 0, sizeof(ports_stats));
1838 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1839 struct fwd_stream *fs = fwd_streams[sm_id];
1841 if (cur_fwd_config.nb_fwd_streams >
1842 cur_fwd_config.nb_fwd_ports) {
1843 fwd_stream_stats_display(sm_id);
1845 ports_stats[fs->tx_port].tx_stream = fs;
1846 ports_stats[fs->rx_port].rx_stream = fs;
1849 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1851 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1852 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1853 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1854 fs->rx_bad_outer_l4_csum;
1855 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
1856 fs->rx_bad_outer_ip_csum;
1858 if (record_core_cycles)
1859 fwd_cycles += fs->core_cycles;
1861 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1862 pt_id = fwd_ports_ids[i];
1863 port = &ports[pt_id];
1865 rte_eth_stats_get(pt_id, &stats);
1866 stats.ipackets -= port->stats.ipackets;
1867 stats.opackets -= port->stats.opackets;
1868 stats.ibytes -= port->stats.ibytes;
1869 stats.obytes -= port->stats.obytes;
1870 stats.imissed -= port->stats.imissed;
1871 stats.oerrors -= port->stats.oerrors;
1872 stats.rx_nombuf -= port->stats.rx_nombuf;
1874 total_recv += stats.ipackets;
1875 total_xmit += stats.opackets;
1876 total_rx_dropped += stats.imissed;
1877 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1878 total_tx_dropped += stats.oerrors;
1879 total_rx_nombuf += stats.rx_nombuf;
1881 printf("\n %s Forward statistics for port %-2d %s\n",
1882 fwd_stats_border, pt_id, fwd_stats_border);
1884 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
1885 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
1886 stats.ipackets + stats.imissed);
1888 if (cur_fwd_eng == &csum_fwd_engine) {
1889 printf(" Bad-ipcsum: %-14"PRIu64
1890 " Bad-l4csum: %-14"PRIu64
1891 "Bad-outer-l4csum: %-14"PRIu64"\n",
1892 ports_stats[pt_id].rx_bad_ip_csum,
1893 ports_stats[pt_id].rx_bad_l4_csum,
1894 ports_stats[pt_id].rx_bad_outer_l4_csum);
1895 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
1896 ports_stats[pt_id].rx_bad_outer_ip_csum);
1898 if (stats.ierrors + stats.rx_nombuf > 0) {
1899 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
1900 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
1903 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
1904 "TX-total: %-"PRIu64"\n",
1905 stats.opackets, ports_stats[pt_id].tx_dropped,
1906 stats.opackets + ports_stats[pt_id].tx_dropped);
1908 if (record_burst_stats) {
1909 if (ports_stats[pt_id].rx_stream)
1910 pkt_burst_stats_display("RX",
1911 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1912 if (ports_stats[pt_id].tx_stream)
1913 pkt_burst_stats_display("TX",
1914 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1917 printf(" %s--------------------------------%s\n",
1918 fwd_stats_border, fwd_stats_border);
1921 printf("\n %s Accumulated forward statistics for all ports"
1923 acc_stats_border, acc_stats_border);
1924 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1926 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1928 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1929 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1930 if (total_rx_nombuf > 0)
1931 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1932 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1934 acc_stats_border, acc_stats_border);
1935 if (record_core_cycles) {
1936 #define CYC_PER_MHZ 1E6
1937 if (total_recv > 0 || total_xmit > 0) {
1938 uint64_t total_pkts = 0;
1939 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
1940 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
1941 total_pkts = total_xmit;
1943 total_pkts = total_recv;
1945 printf("\n CPU cycles/packet=%.2F (total cycles="
1946 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
1948 (double) fwd_cycles / total_pkts,
1949 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
1950 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1956 fwd_stats_reset(void)
1962 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1963 pt_id = fwd_ports_ids[i];
1964 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1966 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1967 struct fwd_stream *fs = fwd_streams[sm_id];
1971 fs->fwd_dropped = 0;
1972 fs->rx_bad_ip_csum = 0;
1973 fs->rx_bad_l4_csum = 0;
1974 fs->rx_bad_outer_l4_csum = 0;
1975 fs->rx_bad_outer_ip_csum = 0;
1977 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1978 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1979 fs->core_cycles = 0;
1984 flush_fwd_rx_queues(void)
1986 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1993 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1994 uint64_t timer_period;
1996 /* convert to number of cycles */
1997 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1999 for (j = 0; j < 2; j++) {
2000 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2001 for (rxq = 0; rxq < nb_rxq; rxq++) {
2002 port_id = fwd_ports_ids[rxp];
2004 * testpmd can stuck in the below do while loop
2005 * if rte_eth_rx_burst() always returns nonzero
2006 * packets. So timer is added to exit this loop
2007 * after 1sec timer expiry.
2009 prev_tsc = rte_rdtsc();
2011 nb_rx = rte_eth_rx_burst(port_id, rxq,
2012 pkts_burst, MAX_PKT_BURST);
2013 for (i = 0; i < nb_rx; i++)
2014 rte_pktmbuf_free(pkts_burst[i]);
2016 cur_tsc = rte_rdtsc();
2017 diff_tsc = cur_tsc - prev_tsc;
2018 timer_tsc += diff_tsc;
2019 } while ((nb_rx > 0) &&
2020 (timer_tsc < timer_period));
2024 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2029 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2031 struct fwd_stream **fsm;
2034 #ifdef RTE_LIB_BITRATESTATS
2035 uint64_t tics_per_1sec;
2036 uint64_t tics_datum;
2037 uint64_t tics_current;
2038 uint16_t i, cnt_ports;
2040 cnt_ports = nb_ports;
2041 tics_datum = rte_rdtsc();
2042 tics_per_1sec = rte_get_timer_hz();
2044 fsm = &fwd_streams[fc->stream_idx];
2045 nb_fs = fc->stream_nb;
2047 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2048 (*pkt_fwd)(fsm[sm_id]);
2049 #ifdef RTE_LIB_BITRATESTATS
2050 if (bitrate_enabled != 0 &&
2051 bitrate_lcore_id == rte_lcore_id()) {
2052 tics_current = rte_rdtsc();
2053 if (tics_current - tics_datum >= tics_per_1sec) {
2054 /* Periodic bitrate calculation */
2055 for (i = 0; i < cnt_ports; i++)
2056 rte_stats_bitrate_calc(bitrate_data,
2058 tics_datum = tics_current;
2062 #ifdef RTE_LIB_LATENCYSTATS
2063 if (latencystats_enabled != 0 &&
2064 latencystats_lcore_id == rte_lcore_id())
2065 rte_latencystats_update();
2068 } while (! fc->stopped);
2072 start_pkt_forward_on_core(void *fwd_arg)
2074 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2075 cur_fwd_config.fwd_eng->packet_fwd);
2080 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2081 * Used to start communication flows in network loopback test configurations.
2084 run_one_txonly_burst_on_core(void *fwd_arg)
2086 struct fwd_lcore *fwd_lc;
2087 struct fwd_lcore tmp_lcore;
2089 fwd_lc = (struct fwd_lcore *) fwd_arg;
2090 tmp_lcore = *fwd_lc;
2091 tmp_lcore.stopped = 1;
2092 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2097 * Launch packet forwarding:
2098 * - Setup per-port forwarding context.
2099 * - launch logical cores with their forwarding configuration.
2102 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2104 port_fwd_begin_t port_fwd_begin;
2109 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2110 if (port_fwd_begin != NULL) {
2111 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2112 (*port_fwd_begin)(fwd_ports_ids[i]);
2114 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2115 lc_id = fwd_lcores_cpuids[i];
2116 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2117 fwd_lcores[i]->stopped = 0;
2118 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2119 fwd_lcores[i], lc_id);
2121 printf("launch lcore %u failed - diag=%d\n",
2128 * Launch packet forwarding configuration.
2131 start_packet_forwarding(int with_tx_first)
2133 port_fwd_begin_t port_fwd_begin;
2134 port_fwd_end_t port_fwd_end;
2137 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2138 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2140 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2141 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2143 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2144 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2145 (!nb_rxq || !nb_txq))
2146 rte_exit(EXIT_FAILURE,
2147 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2148 cur_fwd_eng->fwd_mode_name);
2150 if (all_ports_started() == 0) {
2151 printf("Not all ports were started\n");
2154 if (test_done == 0) {
2155 printf("Packet forwarding already started\n");
2163 flush_fwd_rx_queues();
2165 pkt_fwd_config_display(&cur_fwd_config);
2166 rxtx_config_display();
2169 if (with_tx_first) {
2170 port_fwd_begin = tx_only_engine.port_fwd_begin;
2171 if (port_fwd_begin != NULL) {
2172 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2173 (*port_fwd_begin)(fwd_ports_ids[i]);
2175 while (with_tx_first--) {
2176 launch_packet_forwarding(
2177 run_one_txonly_burst_on_core);
2178 rte_eal_mp_wait_lcore();
2180 port_fwd_end = tx_only_engine.port_fwd_end;
2181 if (port_fwd_end != NULL) {
2182 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2183 (*port_fwd_end)(fwd_ports_ids[i]);
2186 launch_packet_forwarding(start_pkt_forward_on_core);
2190 stop_packet_forwarding(void)
2192 port_fwd_end_t port_fwd_end;
2198 printf("Packet forwarding not started\n");
2201 printf("Telling cores to stop...");
2202 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2203 fwd_lcores[lc_id]->stopped = 1;
2204 printf("\nWaiting for lcores to finish...\n");
2205 rte_eal_mp_wait_lcore();
2206 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2207 if (port_fwd_end != NULL) {
2208 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2209 pt_id = fwd_ports_ids[i];
2210 (*port_fwd_end)(pt_id);
2214 fwd_stats_display();
2216 printf("\nDone.\n");
2221 dev_set_link_up(portid_t pid)
2223 if (rte_eth_dev_set_link_up(pid) < 0)
2224 printf("\nSet link up fail.\n");
2228 dev_set_link_down(portid_t pid)
2230 if (rte_eth_dev_set_link_down(pid) < 0)
2231 printf("\nSet link down fail.\n");
2235 all_ports_started(void)
2238 struct rte_port *port;
2240 RTE_ETH_FOREACH_DEV(pi) {
2242 /* Check if there is a port which is not started */
2243 if ((port->port_status != RTE_PORT_STARTED) &&
2244 (port->slave_flag == 0))
2248 /* No port is not started */
2253 port_is_stopped(portid_t port_id)
2255 struct rte_port *port = &ports[port_id];
2257 if ((port->port_status != RTE_PORT_STOPPED) &&
2258 (port->slave_flag == 0))
2264 all_ports_stopped(void)
2268 RTE_ETH_FOREACH_DEV(pi) {
2269 if (!port_is_stopped(pi))
2277 port_is_started(portid_t port_id)
2279 if (port_id_is_invalid(port_id, ENABLED_WARN))
2282 if (ports[port_id].port_status != RTE_PORT_STARTED)
2288 /* Configure the Rx and Tx hairpin queues for the selected port. */
2290 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2293 struct rte_eth_hairpin_conf hairpin_conf = {
2298 struct rte_port *port = &ports[pi];
2299 uint16_t peer_rx_port = pi;
2300 uint16_t peer_tx_port = pi;
2301 uint32_t manual = 1;
2302 uint32_t tx_exp = hairpin_mode & 0x10;
2304 if (!(hairpin_mode & 0xf)) {
2308 } else if (hairpin_mode & 0x1) {
2309 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2310 RTE_ETH_DEV_NO_OWNER);
2311 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2312 peer_tx_port = rte_eth_find_next_owned_by(0,
2313 RTE_ETH_DEV_NO_OWNER);
2314 if (p_pi != RTE_MAX_ETHPORTS) {
2315 peer_rx_port = p_pi;
2319 /* Last port will be the peer RX port of the first. */
2320 RTE_ETH_FOREACH_DEV(next_pi)
2321 peer_rx_port = next_pi;
2324 } else if (hairpin_mode & 0x2) {
2326 peer_rx_port = p_pi;
2328 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2329 RTE_ETH_DEV_NO_OWNER);
2330 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2333 peer_tx_port = peer_rx_port;
2337 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2338 hairpin_conf.peers[0].port = peer_rx_port;
2339 hairpin_conf.peers[0].queue = i + nb_rxq;
2340 hairpin_conf.manual_bind = !!manual;
2341 hairpin_conf.tx_explicit = !!tx_exp;
2342 diag = rte_eth_tx_hairpin_queue_setup
2343 (pi, qi, nb_txd, &hairpin_conf);
2348 /* Fail to setup rx queue, return */
2349 if (rte_atomic16_cmpset(&(port->port_status),
2351 RTE_PORT_STOPPED) == 0)
2352 printf("Port %d can not be set back "
2353 "to stopped\n", pi);
2354 printf("Fail to configure port %d hairpin "
2356 /* try to reconfigure queues next time */
2357 port->need_reconfig_queues = 1;
2360 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2361 hairpin_conf.peers[0].port = peer_tx_port;
2362 hairpin_conf.peers[0].queue = i + nb_txq;
2363 hairpin_conf.manual_bind = !!manual;
2364 hairpin_conf.tx_explicit = !!tx_exp;
2365 diag = rte_eth_rx_hairpin_queue_setup
2366 (pi, qi, nb_rxd, &hairpin_conf);
2371 /* Fail to setup rx queue, return */
2372 if (rte_atomic16_cmpset(&(port->port_status),
2374 RTE_PORT_STOPPED) == 0)
2375 printf("Port %d can not be set back "
2376 "to stopped\n", pi);
2377 printf("Fail to configure port %d hairpin "
2379 /* try to reconfigure queues next time */
2380 port->need_reconfig_queues = 1;
2386 /* Configure the Rx with optional split. */
2388 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2389 uint16_t nb_rx_desc, unsigned int socket_id,
2390 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2392 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2393 unsigned int i, mp_n;
2396 if (rx_pkt_nb_segs <= 1 ||
2397 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2398 rx_conf->rx_seg = NULL;
2399 rx_conf->rx_nseg = 0;
2400 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2401 nb_rx_desc, socket_id,
2405 for (i = 0; i < rx_pkt_nb_segs; i++) {
2406 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2407 struct rte_mempool *mpx;
2409 * Use last valid pool for the segments with number
2410 * exceeding the pool index.
2412 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2413 mpx = mbuf_pool_find(socket_id, mp_n);
2414 /* Handle zero as mbuf data buffer size. */
2415 rx_seg->length = rx_pkt_seg_lengths[i] ?
2416 rx_pkt_seg_lengths[i] :
2417 mbuf_data_size[mp_n];
2418 rx_seg->offset = i < rx_pkt_nb_offs ?
2419 rx_pkt_seg_offsets[i] : 0;
2420 rx_seg->mp = mpx ? mpx : mp;
2422 rx_conf->rx_nseg = rx_pkt_nb_segs;
2423 rx_conf->rx_seg = rx_useg;
2424 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2425 socket_id, rx_conf, NULL);
2426 rx_conf->rx_seg = NULL;
2427 rx_conf->rx_nseg = 0;
2432 start_port(portid_t pid)
2434 int diag, need_check_link_status = -1;
2436 portid_t p_pi = RTE_MAX_ETHPORTS;
2437 portid_t pl[RTE_MAX_ETHPORTS];
2438 portid_t peer_pl[RTE_MAX_ETHPORTS];
2439 uint16_t cnt_pi = 0;
2440 uint16_t cfg_pi = 0;
2443 struct rte_port *port;
2444 struct rte_ether_addr mac_addr;
2445 struct rte_eth_hairpin_cap cap;
2447 if (port_id_is_invalid(pid, ENABLED_WARN))
2450 RTE_ETH_FOREACH_DEV(pi) {
2451 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2454 need_check_link_status = 0;
2456 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2457 RTE_PORT_HANDLING) == 0) {
2458 printf("Port %d is now not stopped\n", pi);
2462 if (port->need_reconfig > 0) {
2463 port->need_reconfig = 0;
2465 if (flow_isolate_all) {
2466 int ret = port_flow_isolate(pi, 1);
2468 printf("Failed to apply isolated"
2469 " mode on port %d\n", pi);
2473 configure_rxtx_dump_callbacks(0);
2474 printf("Configuring Port %d (socket %u)\n", pi,
2476 if (nb_hairpinq > 0 &&
2477 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2478 printf("Port %d doesn't support hairpin "
2482 /* configure port */
2483 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2484 nb_txq + nb_hairpinq,
2487 if (rte_atomic16_cmpset(&(port->port_status),
2488 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2489 printf("Port %d can not be set back "
2490 "to stopped\n", pi);
2491 printf("Fail to configure port %d\n", pi);
2492 /* try to reconfigure port next time */
2493 port->need_reconfig = 1;
2497 if (port->need_reconfig_queues > 0) {
2498 port->need_reconfig_queues = 0;
2499 /* setup tx queues */
2500 for (qi = 0; qi < nb_txq; qi++) {
2501 if ((numa_support) &&
2502 (txring_numa[pi] != NUMA_NO_CONFIG))
2503 diag = rte_eth_tx_queue_setup(pi, qi,
2504 port->nb_tx_desc[qi],
2506 &(port->tx_conf[qi]));
2508 diag = rte_eth_tx_queue_setup(pi, qi,
2509 port->nb_tx_desc[qi],
2511 &(port->tx_conf[qi]));
2516 /* Fail to setup tx queue, return */
2517 if (rte_atomic16_cmpset(&(port->port_status),
2519 RTE_PORT_STOPPED) == 0)
2520 printf("Port %d can not be set back "
2521 "to stopped\n", pi);
2522 printf("Fail to configure port %d tx queues\n",
2524 /* try to reconfigure queues next time */
2525 port->need_reconfig_queues = 1;
2528 for (qi = 0; qi < nb_rxq; qi++) {
2529 /* setup rx queues */
2530 if ((numa_support) &&
2531 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2532 struct rte_mempool * mp =
2534 (rxring_numa[pi], 0);
2536 printf("Failed to setup RX queue:"
2537 "No mempool allocation"
2538 " on the socket %d\n",
2543 diag = rx_queue_setup(pi, qi,
2544 port->nb_rx_desc[qi],
2546 &(port->rx_conf[qi]),
2549 struct rte_mempool *mp =
2551 (port->socket_id, 0);
2553 printf("Failed to setup RX queue:"
2554 "No mempool allocation"
2555 " on the socket %d\n",
2559 diag = rx_queue_setup(pi, qi,
2560 port->nb_rx_desc[qi],
2562 &(port->rx_conf[qi]),
2568 /* Fail to setup rx queue, return */
2569 if (rte_atomic16_cmpset(&(port->port_status),
2571 RTE_PORT_STOPPED) == 0)
2572 printf("Port %d can not be set back "
2573 "to stopped\n", pi);
2574 printf("Fail to configure port %d rx queues\n",
2576 /* try to reconfigure queues next time */
2577 port->need_reconfig_queues = 1;
2580 /* setup hairpin queues */
2581 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2584 configure_rxtx_dump_callbacks(verbose_level);
2586 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2590 "Port %d: Failed to disable Ptype parsing\n",
2598 diag = rte_eth_dev_start(pi);
2600 printf("Fail to start port %d: %s\n", pi,
2601 rte_strerror(-diag));
2603 /* Fail to setup rx queue, return */
2604 if (rte_atomic16_cmpset(&(port->port_status),
2605 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2606 printf("Port %d can not be set back to "
2611 if (rte_atomic16_cmpset(&(port->port_status),
2612 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2613 printf("Port %d can not be set into started\n", pi);
2615 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2616 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2617 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2618 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2619 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2621 /* at least one port started, need checking link status */
2622 need_check_link_status = 1;
2627 if (need_check_link_status == 1 && !no_link_check)
2628 check_all_ports_link_status(RTE_PORT_ALL);
2629 else if (need_check_link_status == 0)
2630 printf("Please stop the ports first\n");
2632 if (hairpin_mode & 0xf) {
2636 /* bind all started hairpin ports */
2637 for (i = 0; i < cfg_pi; i++) {
2639 /* bind current Tx to all peer Rx */
2640 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2641 RTE_MAX_ETHPORTS, 1);
2644 for (j = 0; j < peer_pi; j++) {
2645 if (!port_is_started(peer_pl[j]))
2647 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2649 printf("Error during binding hairpin"
2650 " Tx port %u to %u: %s\n",
2652 rte_strerror(-diag));
2656 /* bind all peer Tx to current Rx */
2657 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2658 RTE_MAX_ETHPORTS, 0);
2661 for (j = 0; j < peer_pi; j++) {
2662 if (!port_is_started(peer_pl[j]))
2664 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2666 printf("Error during binding hairpin"
2667 " Tx port %u to %u: %s\n",
2669 rte_strerror(-diag));
2681 stop_port(portid_t pid)
2684 struct rte_port *port;
2685 int need_check_link_status = 0;
2686 portid_t peer_pl[RTE_MAX_ETHPORTS];
2689 if (port_id_is_invalid(pid, ENABLED_WARN))
2692 printf("Stopping ports...\n");
2694 RTE_ETH_FOREACH_DEV(pi) {
2695 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2698 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2699 printf("Please remove port %d from forwarding configuration.\n", pi);
2703 if (port_is_bonding_slave(pi)) {
2704 printf("Please remove port %d from bonded device.\n", pi);
2709 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2710 RTE_PORT_HANDLING) == 0)
2713 if (hairpin_mode & 0xf) {
2716 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2717 /* unbind all peer Tx from current Rx */
2718 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2719 RTE_MAX_ETHPORTS, 0);
2722 for (j = 0; j < peer_pi; j++) {
2723 if (!port_is_started(peer_pl[j]))
2725 rte_eth_hairpin_unbind(peer_pl[j], pi);
2729 if (port->flow_list)
2730 port_flow_flush(pi);
2732 if (rte_eth_dev_stop(pi) != 0)
2733 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2736 if (rte_atomic16_cmpset(&(port->port_status),
2737 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2738 printf("Port %d can not be set into stopped\n", pi);
2739 need_check_link_status = 1;
2741 if (need_check_link_status && !no_link_check)
2742 check_all_ports_link_status(RTE_PORT_ALL);
2748 remove_invalid_ports_in(portid_t *array, portid_t *total)
2751 portid_t new_total = 0;
2753 for (i = 0; i < *total; i++)
2754 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2755 array[new_total] = array[i];
2762 remove_invalid_ports(void)
2764 remove_invalid_ports_in(ports_ids, &nb_ports);
2765 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2766 nb_cfg_ports = nb_fwd_ports;
2770 close_port(portid_t pid)
2773 struct rte_port *port;
2775 if (port_id_is_invalid(pid, ENABLED_WARN))
2778 printf("Closing ports...\n");
2780 RTE_ETH_FOREACH_DEV(pi) {
2781 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2784 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2785 printf("Please remove port %d from forwarding configuration.\n", pi);
2789 if (port_is_bonding_slave(pi)) {
2790 printf("Please remove port %d from bonded device.\n", pi);
2795 if (rte_atomic16_cmpset(&(port->port_status),
2796 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2797 printf("Port %d is already closed\n", pi);
2801 port_flow_flush(pi);
2802 rte_eth_dev_close(pi);
2805 remove_invalid_ports();
2810 reset_port(portid_t pid)
2814 struct rte_port *port;
2816 if (port_id_is_invalid(pid, ENABLED_WARN))
2819 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2820 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2821 printf("Can not reset port(s), please stop port(s) first.\n");
2825 printf("Resetting ports...\n");
2827 RTE_ETH_FOREACH_DEV(pi) {
2828 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2831 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2832 printf("Please remove port %d from forwarding "
2833 "configuration.\n", pi);
2837 if (port_is_bonding_slave(pi)) {
2838 printf("Please remove port %d from bonded device.\n",
2843 diag = rte_eth_dev_reset(pi);
2846 port->need_reconfig = 1;
2847 port->need_reconfig_queues = 1;
2849 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2857 attach_port(char *identifier)
2860 struct rte_dev_iterator iterator;
2862 printf("Attaching a new port...\n");
2864 if (identifier == NULL) {
2865 printf("Invalid parameters are specified\n");
2869 if (rte_dev_probe(identifier) < 0) {
2870 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2874 /* first attach mode: event */
2875 if (setup_on_probe_event) {
2876 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2877 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2878 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2879 ports[pi].need_setup != 0)
2880 setup_attached_port(pi);
2884 /* second attach mode: iterator */
2885 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2886 /* setup ports matching the devargs used for probing */
2887 if (port_is_forwarding(pi))
2888 continue; /* port was already attached before */
2889 setup_attached_port(pi);
2894 setup_attached_port(portid_t pi)
2896 unsigned int socket_id;
2899 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2900 /* if socket_id is invalid, set to the first available socket. */
2901 if (check_socket_id(socket_id) < 0)
2902 socket_id = socket_ids[0];
2903 reconfig(pi, socket_id);
2904 ret = rte_eth_promiscuous_enable(pi);
2906 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2907 pi, rte_strerror(-ret));
2909 ports_ids[nb_ports++] = pi;
2910 fwd_ports_ids[nb_fwd_ports++] = pi;
2911 nb_cfg_ports = nb_fwd_ports;
2912 ports[pi].need_setup = 0;
2913 ports[pi].port_status = RTE_PORT_STOPPED;
2915 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2920 detach_device(struct rte_device *dev)
2925 printf("Device already removed\n");
2929 printf("Removing a device...\n");
2931 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2932 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2933 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
2934 printf("Port %u not stopped\n", sibling);
2937 port_flow_flush(sibling);
2941 if (rte_dev_remove(dev) < 0) {
2942 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2945 remove_invalid_ports();
2947 printf("Device is detached\n");
2948 printf("Now total ports is %d\n", nb_ports);
2954 detach_port_device(portid_t port_id)
2956 if (port_id_is_invalid(port_id, ENABLED_WARN))
2959 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2960 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2961 printf("Port not stopped\n");
2964 printf("Port was not closed\n");
2967 detach_device(rte_eth_devices[port_id].device);
2971 detach_devargs(char *identifier)
2973 struct rte_dev_iterator iterator;
2974 struct rte_devargs da;
2977 printf("Removing a device...\n");
2979 memset(&da, 0, sizeof(da));
2980 if (rte_devargs_parsef(&da, "%s", identifier)) {
2981 printf("cannot parse identifier\n");
2985 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2986 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2987 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2988 printf("Port %u not stopped\n", port_id);
2989 rte_eth_iterator_cleanup(&iterator);
2990 rte_devargs_reset(&da);
2993 port_flow_flush(port_id);
2997 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2998 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2999 da.name, da.bus->name);
3000 rte_devargs_reset(&da);
3004 remove_invalid_ports();
3006 printf("Device %s is detached\n", identifier);
3007 printf("Now total ports is %d\n", nb_ports);
3009 rte_devargs_reset(&da);
3020 stop_packet_forwarding();
3022 #ifndef RTE_EXEC_ENV_WINDOWS
3023 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3025 if (mp_alloc_type == MP_ALLOC_ANON)
3026 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3031 if (ports != NULL) {
3033 RTE_ETH_FOREACH_DEV(pt_id) {
3034 printf("\nStopping port %d...\n", pt_id);
3038 RTE_ETH_FOREACH_DEV(pt_id) {
3039 printf("\nShutting down port %d...\n", pt_id);
3046 ret = rte_dev_event_monitor_stop();
3049 "fail to stop device event monitor.");
3053 ret = rte_dev_event_callback_unregister(NULL,
3054 dev_event_callback, NULL);
3057 "fail to unregister device event callback.\n");
3061 ret = rte_dev_hotplug_handle_disable();
3064 "fail to disable hotplug handling.\n");
3068 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3070 rte_mempool_free(mempools[i]);
3073 printf("\nBye...\n");
3076 typedef void (*cmd_func_t)(void);
3077 struct pmd_test_command {
3078 const char *cmd_name;
3079 cmd_func_t cmd_func;
3082 /* Check the link status of all ports in up to 9s, and print them finally */
3084 check_all_ports_link_status(uint32_t port_mask)
3086 #define CHECK_INTERVAL 100 /* 100ms */
3087 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3089 uint8_t count, all_ports_up, print_flag = 0;
3090 struct rte_eth_link link;
3092 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3094 printf("Checking link statuses...\n");
3096 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3098 RTE_ETH_FOREACH_DEV(portid) {
3099 if ((port_mask & (1 << portid)) == 0)
3101 memset(&link, 0, sizeof(link));
3102 ret = rte_eth_link_get_nowait(portid, &link);
3105 if (print_flag == 1)
3106 printf("Port %u link get failed: %s\n",
3107 portid, rte_strerror(-ret));
3110 /* print link status if flag set */
3111 if (print_flag == 1) {
3112 rte_eth_link_to_str(link_status,
3113 sizeof(link_status), &link);
3114 printf("Port %d %s\n", portid, link_status);
3117 /* clear all_ports_up flag if any link down */
3118 if (link.link_status == ETH_LINK_DOWN) {
3123 /* after finally printing all link status, get out */
3124 if (print_flag == 1)
3127 if (all_ports_up == 0) {
3129 rte_delay_ms(CHECK_INTERVAL);
3132 /* set the print_flag if all ports up or timeout */
3133 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3143 rmv_port_callback(void *arg)
3145 int need_to_start = 0;
3146 int org_no_link_check = no_link_check;
3147 portid_t port_id = (intptr_t)arg;
3148 struct rte_device *dev;
3150 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3152 if (!test_done && port_is_forwarding(port_id)) {
3154 stop_packet_forwarding();
3158 no_link_check = org_no_link_check;
3160 /* Save rte_device pointer before closing ethdev port */
3161 dev = rte_eth_devices[port_id].device;
3162 close_port(port_id);
3163 detach_device(dev); /* might be already removed or have more ports */
3166 start_packet_forwarding(0);
3169 /* This function is used by the interrupt thread */
3171 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3174 RTE_SET_USED(param);
3175 RTE_SET_USED(ret_param);
3177 if (type >= RTE_ETH_EVENT_MAX) {
3178 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3179 port_id, __func__, type);
3181 } else if (event_print_mask & (UINT32_C(1) << type)) {
3182 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3183 eth_event_desc[type]);
3188 case RTE_ETH_EVENT_NEW:
3189 ports[port_id].need_setup = 1;
3190 ports[port_id].port_status = RTE_PORT_HANDLING;
3192 case RTE_ETH_EVENT_INTR_RMV:
3193 if (port_id_is_invalid(port_id, DISABLED_WARN))
3195 if (rte_eal_alarm_set(100000,
3196 rmv_port_callback, (void *)(intptr_t)port_id))
3197 fprintf(stderr, "Could not set up deferred device removal\n");
3199 case RTE_ETH_EVENT_DESTROY:
3200 ports[port_id].port_status = RTE_PORT_CLOSED;
3201 printf("Port %u is closed\n", port_id);
3210 register_eth_event_callback(void)
3213 enum rte_eth_event_type event;
3215 for (event = RTE_ETH_EVENT_UNKNOWN;
3216 event < RTE_ETH_EVENT_MAX; event++) {
3217 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3222 TESTPMD_LOG(ERR, "Failed to register callback for "
3223 "%s event\n", eth_event_desc[event]);
3231 /* This function is used by the interrupt thread */
3233 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3234 __rte_unused void *arg)
3239 if (type >= RTE_DEV_EVENT_MAX) {
3240 fprintf(stderr, "%s called upon invalid event %d\n",
3246 case RTE_DEV_EVENT_REMOVE:
3247 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3249 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3251 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3256 * Because the user's callback is invoked in eal interrupt
3257 * callback, the interrupt callback need to be finished before
3258 * it can be unregistered when detaching device. So finish
3259 * callback soon and use a deferred removal to detach device
3260 * is need. It is a workaround, once the device detaching be
3261 * moved into the eal in the future, the deferred removal could
3264 if (rte_eal_alarm_set(100000,
3265 rmv_port_callback, (void *)(intptr_t)port_id))
3267 "Could not set up deferred device removal\n");
3269 case RTE_DEV_EVENT_ADD:
3270 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3272 /* TODO: After finish kernel driver binding,
3273 * begin to attach port.
3282 rxtx_port_config(struct rte_port *port)
3287 for (qid = 0; qid < nb_rxq; qid++) {
3288 offloads = port->rx_conf[qid].offloads;
3289 port->rx_conf[qid] = port->dev_info.default_rxconf;
3291 port->rx_conf[qid].offloads = offloads;
3293 /* Check if any Rx parameters have been passed */
3294 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3295 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3297 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3298 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3300 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3301 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3303 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3304 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3306 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3307 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3309 port->nb_rx_desc[qid] = nb_rxd;
3312 for (qid = 0; qid < nb_txq; qid++) {
3313 offloads = port->tx_conf[qid].offloads;
3314 port->tx_conf[qid] = port->dev_info.default_txconf;
3316 port->tx_conf[qid].offloads = offloads;
3318 /* Check if any Tx parameters have been passed */
3319 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3320 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3322 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3323 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3325 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3326 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3328 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3329 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3331 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3332 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3334 port->nb_tx_desc[qid] = nb_txd;
3339 * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
3340 * MTU is also aligned if JUMBO_FRAME offload is not set.
3342 * port->dev_info should be set before calling this function.
3344 * return 0 on success, negative on error
3347 update_jumbo_frame_offload(portid_t portid)
3349 struct rte_port *port = &ports[portid];
3350 uint32_t eth_overhead;
3351 uint64_t rx_offloads;
3355 /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
3356 if (port->dev_info.max_mtu != UINT16_MAX &&
3357 port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
3358 eth_overhead = port->dev_info.max_rx_pktlen -
3359 port->dev_info.max_mtu;
3361 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3363 rx_offloads = port->dev_conf.rxmode.offloads;
3365 /* Default config value is 0 to use PMD specific overhead */
3366 if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
3367 port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
3369 if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
3370 rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3373 if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3374 printf("Frame size (%u) is not supported by port %u\n",
3375 port->dev_conf.rxmode.max_rx_pkt_len,
3379 rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3383 if (rx_offloads != port->dev_conf.rxmode.offloads) {
3386 port->dev_conf.rxmode.offloads = rx_offloads;
3388 /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
3389 for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
3391 port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3393 port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3397 /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
3398 * if unset do it here
3400 if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3401 ret = rte_eth_dev_set_mtu(portid,
3402 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
3404 printf("Failed to set MTU to %u for port %u\n",
3405 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
3413 init_port_config(void)
3416 struct rte_port *port;
3419 RTE_ETH_FOREACH_DEV(pid) {
3421 port->dev_conf.fdir_conf = fdir_conf;
3423 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3428 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3429 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3430 rss_hf & port->dev_info.flow_type_rss_offloads;
3432 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3433 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3436 if (port->dcb_flag == 0) {
3437 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3438 port->dev_conf.rxmode.mq_mode =
3439 (enum rte_eth_rx_mq_mode)
3440 (rx_mq_mode & ETH_MQ_RX_RSS);
3442 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3445 rxtx_port_config(port);
3447 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3451 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3452 rte_pmd_ixgbe_bypass_init(pid);
3455 if (lsc_interrupt &&
3456 (rte_eth_devices[pid].data->dev_flags &
3457 RTE_ETH_DEV_INTR_LSC))
3458 port->dev_conf.intr_conf.lsc = 1;
3459 if (rmv_interrupt &&
3460 (rte_eth_devices[pid].data->dev_flags &
3461 RTE_ETH_DEV_INTR_RMV))
3462 port->dev_conf.intr_conf.rmv = 1;
3466 void set_port_slave_flag(portid_t slave_pid)
3468 struct rte_port *port;
3470 port = &ports[slave_pid];
3471 port->slave_flag = 1;
3474 void clear_port_slave_flag(portid_t slave_pid)
3476 struct rte_port *port;
3478 port = &ports[slave_pid];
3479 port->slave_flag = 0;
3482 uint8_t port_is_bonding_slave(portid_t slave_pid)
3484 struct rte_port *port;
3486 port = &ports[slave_pid];
3487 if ((rte_eth_devices[slave_pid].data->dev_flags &
3488 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3493 const uint16_t vlan_tags[] = {
3494 0, 1, 2, 3, 4, 5, 6, 7,
3495 8, 9, 10, 11, 12, 13, 14, 15,
3496 16, 17, 18, 19, 20, 21, 22, 23,
3497 24, 25, 26, 27, 28, 29, 30, 31
3501 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3502 enum dcb_mode_enable dcb_mode,
3503 enum rte_eth_nb_tcs num_tcs,
3508 struct rte_eth_rss_conf rss_conf;
3511 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3512 * given above, and the number of traffic classes available for use.
3514 if (dcb_mode == DCB_VT_ENABLED) {
3515 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3516 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3517 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3518 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3520 /* VMDQ+DCB RX and TX configurations */
3521 vmdq_rx_conf->enable_default_pool = 0;
3522 vmdq_rx_conf->default_pool = 0;
3523 vmdq_rx_conf->nb_queue_pools =
3524 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3525 vmdq_tx_conf->nb_queue_pools =
3526 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3528 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3529 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3530 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3531 vmdq_rx_conf->pool_map[i].pools =
3532 1 << (i % vmdq_rx_conf->nb_queue_pools);
3534 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3535 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3536 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3539 /* set DCB mode of RX and TX of multiple queues */
3540 eth_conf->rxmode.mq_mode =
3541 (enum rte_eth_rx_mq_mode)
3542 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3543 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3545 struct rte_eth_dcb_rx_conf *rx_conf =
3546 ð_conf->rx_adv_conf.dcb_rx_conf;
3547 struct rte_eth_dcb_tx_conf *tx_conf =
3548 ð_conf->tx_adv_conf.dcb_tx_conf;
3550 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3552 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3556 rx_conf->nb_tcs = num_tcs;
3557 tx_conf->nb_tcs = num_tcs;
3559 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3560 rx_conf->dcb_tc[i] = i % num_tcs;
3561 tx_conf->dcb_tc[i] = i % num_tcs;
3564 eth_conf->rxmode.mq_mode =
3565 (enum rte_eth_rx_mq_mode)
3566 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3567 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3568 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3572 eth_conf->dcb_capability_en =
3573 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3575 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3581 init_port_dcb_config(portid_t pid,
3582 enum dcb_mode_enable dcb_mode,
3583 enum rte_eth_nb_tcs num_tcs,
3586 struct rte_eth_conf port_conf;
3587 struct rte_port *rte_port;
3591 rte_port = &ports[pid];
3593 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3595 port_conf.rxmode = rte_port->dev_conf.rxmode;
3596 port_conf.txmode = rte_port->dev_conf.txmode;
3598 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3599 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3602 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3604 /* re-configure the device . */
3605 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3609 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3613 /* If dev_info.vmdq_pool_base is greater than 0,
3614 * the queue id of vmdq pools is started after pf queues.
3616 if (dcb_mode == DCB_VT_ENABLED &&
3617 rte_port->dev_info.vmdq_pool_base > 0) {
3618 printf("VMDQ_DCB multi-queue mode is nonsensical"
3619 " for port %d.", pid);
3623 /* Assume the ports in testpmd have the same dcb capability
3624 * and has the same number of rxq and txq in dcb mode
3626 if (dcb_mode == DCB_VT_ENABLED) {
3627 if (rte_port->dev_info.max_vfs > 0) {
3628 nb_rxq = rte_port->dev_info.nb_rx_queues;
3629 nb_txq = rte_port->dev_info.nb_tx_queues;
3631 nb_rxq = rte_port->dev_info.max_rx_queues;
3632 nb_txq = rte_port->dev_info.max_tx_queues;
3635 /*if vt is disabled, use all pf queues */
3636 if (rte_port->dev_info.vmdq_pool_base == 0) {
3637 nb_rxq = rte_port->dev_info.max_rx_queues;
3638 nb_txq = rte_port->dev_info.max_tx_queues;
3640 nb_rxq = (queueid_t)num_tcs;
3641 nb_txq = (queueid_t)num_tcs;
3645 rx_free_thresh = 64;
3647 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3649 rxtx_port_config(rte_port);
3651 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3652 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3653 rx_vft_set(pid, vlan_tags[i], 1);
3655 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3659 rte_port->dcb_flag = 1;
3661 /* Enter DCB configuration status */
3672 /* Configuration of Ethernet ports. */
3673 ports = rte_zmalloc("testpmd: ports",
3674 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3675 RTE_CACHE_LINE_SIZE);
3676 if (ports == NULL) {
3677 rte_exit(EXIT_FAILURE,
3678 "rte_zmalloc(%d struct rte_port) failed\n",
3681 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3682 LIST_INIT(&ports[i].flow_tunnel_list);
3683 /* Initialize ports NUMA structures */
3684 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3685 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3686 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3700 const char clr[] = { 27, '[', '2', 'J', '\0' };
3701 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3703 /* Clear screen and move to top left */
3704 printf("%s%s", clr, top_left);
3706 printf("\nPort statistics ====================================");
3707 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3708 nic_stats_display(fwd_ports_ids[i]);
3714 signal_handler(int signum)
3716 if (signum == SIGINT || signum == SIGTERM) {
3717 printf("\nSignal %d received, preparing to exit...\n",
3719 #ifdef RTE_LIB_PDUMP
3720 /* uninitialize packet capture framework */
3723 #ifdef RTE_LIB_LATENCYSTATS
3724 if (latencystats_enabled != 0)
3725 rte_latencystats_uninit();
3728 /* Set flag to indicate the force termination. */
3730 /* exit with the expected status */
3731 #ifndef RTE_EXEC_ENV_WINDOWS
3732 signal(signum, SIG_DFL);
3733 kill(getpid(), signum);
3739 main(int argc, char** argv)
3746 signal(SIGINT, signal_handler);
3747 signal(SIGTERM, signal_handler);
3749 testpmd_logtype = rte_log_register("testpmd");
3750 if (testpmd_logtype < 0)
3751 rte_exit(EXIT_FAILURE, "Cannot register log type");
3752 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3754 diag = rte_eal_init(argc, argv);
3756 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3757 rte_strerror(rte_errno));
3759 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3760 rte_exit(EXIT_FAILURE,
3761 "Secondary process type not supported.\n");
3763 ret = register_eth_event_callback();
3765 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3767 #ifdef RTE_LIB_PDUMP
3768 /* initialize packet capture framework */
3773 RTE_ETH_FOREACH_DEV(port_id) {
3774 ports_ids[count] = port_id;
3777 nb_ports = (portid_t) count;
3779 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3781 /* allocate port structures, and init them */
3784 set_def_fwd_config();
3786 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3787 "Check the core mask argument\n");
3789 /* Bitrate/latency stats disabled by default */
3790 #ifdef RTE_LIB_BITRATESTATS
3791 bitrate_enabled = 0;
3793 #ifdef RTE_LIB_LATENCYSTATS
3794 latencystats_enabled = 0;
3797 /* on FreeBSD, mlockall() is disabled by default */
3798 #ifdef RTE_EXEC_ENV_FREEBSD
3807 launch_args_parse(argc, argv);
3809 #ifndef RTE_EXEC_ENV_WINDOWS
3810 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3811 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3816 if (tx_first && interactive)
3817 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3818 "interactive mode.\n");
3820 if (tx_first && lsc_interrupt) {
3821 printf("Warning: lsc_interrupt needs to be off when "
3822 " using tx_first. Disabling.\n");
3826 if (!nb_rxq && !nb_txq)
3827 printf("Warning: Either rx or tx queues should be non-zero\n");
3829 if (nb_rxq > 1 && nb_rxq > nb_txq)
3830 printf("Warning: nb_rxq=%d enables RSS configuration, "
3831 "but nb_txq=%d will prevent to fully test it.\n",
3837 ret = rte_dev_hotplug_handle_enable();
3840 "fail to enable hotplug handling.");
3844 ret = rte_dev_event_monitor_start();
3847 "fail to start device event monitoring.");
3851 ret = rte_dev_event_callback_register(NULL,
3852 dev_event_callback, NULL);
3855 "fail to register device event callback\n");
3860 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3861 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3863 /* set all ports to promiscuous mode by default */
3864 RTE_ETH_FOREACH_DEV(port_id) {
3865 ret = rte_eth_promiscuous_enable(port_id);
3867 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3868 port_id, rte_strerror(-ret));
3871 /* Init metrics library */
3872 rte_metrics_init(rte_socket_id());
3874 #ifdef RTE_LIB_LATENCYSTATS
3875 if (latencystats_enabled != 0) {
3876 int ret = rte_latencystats_init(1, NULL);
3878 printf("Warning: latencystats init()"
3879 " returned error %d\n", ret);
3880 printf("Latencystats running on lcore %d\n",
3881 latencystats_lcore_id);
3885 /* Setup bitrate stats */
3886 #ifdef RTE_LIB_BITRATESTATS
3887 if (bitrate_enabled != 0) {
3888 bitrate_data = rte_stats_bitrate_create();
3889 if (bitrate_data == NULL)
3890 rte_exit(EXIT_FAILURE,
3891 "Could not allocate bitrate data.\n");
3892 rte_stats_bitrate_reg(bitrate_data);
3896 #ifdef RTE_LIB_CMDLINE
3897 if (strlen(cmdline_filename) != 0)
3898 cmdline_read_from_file(cmdline_filename);
3900 if (interactive == 1) {
3902 printf("Start automatic packet forwarding\n");
3903 start_packet_forwarding(0);
3915 printf("No commandline core given, start packet forwarding\n");
3916 start_packet_forwarding(tx_first);
3917 if (stats_period != 0) {
3918 uint64_t prev_time = 0, cur_time, diff_time = 0;
3919 uint64_t timer_period;
3921 /* Convert to number of cycles */
3922 timer_period = stats_period * rte_get_timer_hz();
3924 while (f_quit == 0) {
3925 cur_time = rte_get_timer_cycles();
3926 diff_time += cur_time - prev_time;
3928 if (diff_time >= timer_period) {
3930 /* Reset the timer */
3933 /* Sleep to avoid unnecessary checks */
3934 prev_time = cur_time;
3935 rte_delay_us_sleep(US_PER_S);
3939 printf("Press enter to exit\n");
3940 rc = read(0, &c, 1);
3946 ret = rte_eal_cleanup();
3948 rte_exit(EXIT_FAILURE,
3949 "EAL cleanup failed: %s\n", strerror(-ret));
3951 return EXIT_SUCCESS;