1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
181 #if defined RTE_LIBRTE_PMD_SOFTNIC
184 #ifdef RTE_LIBRTE_IEEE1588
185 &ieee1588_fwd_engine,
190 struct fwd_config cur_fwd_config;
191 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
192 uint32_t retry_enabled;
193 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
194 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
196 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
197 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
198 * specified on command-line. */
199 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
202 * In container, it cannot terminate the process which running with 'stats-period'
203 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
208 * Configuration of packet segments used by the "txonly" processing engine.
210 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
211 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
212 TXONLY_DEF_PACKET_LEN,
214 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
216 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
217 /**< Split policy for packets to TX. */
219 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
220 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
222 /* current configuration is in DCB or not,0 means it is not in DCB mode */
223 uint8_t dcb_config = 0;
225 /* Whether the dcb is in testing status */
226 uint8_t dcb_test = 0;
229 * Configurable number of RX/TX queues.
231 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
232 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
235 * Configurable number of RX/TX ring descriptors.
236 * Defaults are supplied by drivers via ethdev.
238 #define RTE_TEST_RX_DESC_DEFAULT 0
239 #define RTE_TEST_TX_DESC_DEFAULT 0
240 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
241 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
243 #define RTE_PMD_PARAM_UNSET -1
245 * Configurable values of RX and TX ring threshold registers.
248 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
249 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
252 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
257 * Configurable value of RX free threshold.
259 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of RX drop enable.
264 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
267 * Configurable value of TX free threshold.
269 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
272 * Configurable value of TX RS bit threshold.
274 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
277 * Receive Side Scaling (RSS) configuration.
279 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
282 * Port topology configuration
284 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
287 * Avoids to flush all the RX streams before starts forwarding.
289 uint8_t no_flush_rx = 0; /* flush by default */
292 * Flow API isolated mode.
294 uint8_t flow_isolate_all;
297 * Avoids to check link status when starting/stopping a port.
299 uint8_t no_link_check = 0; /* check by default */
302 * Enable link status change notification
304 uint8_t lsc_interrupt = 1; /* enabled by default */
307 * Enable device removal notification.
309 uint8_t rmv_interrupt = 1; /* enabled by default */
311 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
314 * Display or mask ether events
315 * Default to all events except VF_MBOX
317 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
318 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
319 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
320 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
321 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
322 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
323 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
325 * Decide if all memory are locked for performance.
330 * NIC bypass mode configuration options.
333 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
334 /* The NIC bypass watchdog timeout. */
335 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
339 #ifdef RTE_LIBRTE_LATENCY_STATS
342 * Set when latency stats is enabled in the commandline
344 uint8_t latencystats_enabled;
347 * Lcore ID to serive latency statistics.
349 lcoreid_t latencystats_lcore_id = -1;
354 * Ethernet device configuration.
356 struct rte_eth_rxmode rx_mode = {
357 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
360 struct rte_eth_txmode tx_mode = {
361 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
364 struct rte_fdir_conf fdir_conf = {
365 .mode = RTE_FDIR_MODE_NONE,
366 .pballoc = RTE_FDIR_PBALLOC_64K,
367 .status = RTE_FDIR_REPORT_STATUS,
369 .vlan_tci_mask = 0xFFEF,
371 .src_ip = 0xFFFFFFFF,
372 .dst_ip = 0xFFFFFFFF,
375 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
376 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
378 .src_port_mask = 0xFFFF,
379 .dst_port_mask = 0xFFFF,
380 .mac_addr_byte_mask = 0xFF,
381 .tunnel_type_mask = 1,
382 .tunnel_id_mask = 0xFFFFFFFF,
387 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
389 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
390 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
392 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
393 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
395 uint16_t nb_tx_queue_stats_mappings = 0;
396 uint16_t nb_rx_queue_stats_mappings = 0;
399 * Display zero values by default for xstats
401 uint8_t xstats_hide_zero;
403 unsigned int num_sockets = 0;
404 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
406 #ifdef RTE_LIBRTE_BITRATE
407 /* Bitrate statistics */
408 struct rte_stats_bitrates *bitrate_data;
409 lcoreid_t bitrate_lcore_id;
410 uint8_t bitrate_enabled;
413 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
414 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
416 struct vxlan_encap_conf vxlan_encap_conf = {
419 .vni = "\x00\x00\x00",
421 .udp_dst = RTE_BE16(4789),
422 .ipv4_src = IPv4(127, 0, 0, 1),
423 .ipv4_dst = IPv4(255, 255, 255, 255),
424 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
425 "\x00\x00\x00\x00\x00\x00\x00\x01",
426 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
427 "\x00\x00\x00\x00\x00\x00\x11\x11",
429 .eth_src = "\x00\x00\x00\x00\x00\x00",
430 .eth_dst = "\xff\xff\xff\xff\xff\xff",
433 struct nvgre_encap_conf nvgre_encap_conf = {
436 .tni = "\x00\x00\x00",
437 .ipv4_src = IPv4(127, 0, 0, 1),
438 .ipv4_dst = IPv4(255, 255, 255, 255),
439 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
440 "\x00\x00\x00\x00\x00\x00\x00\x01",
441 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
442 "\x00\x00\x00\x00\x00\x00\x11\x11",
444 .eth_src = "\x00\x00\x00\x00\x00\x00",
445 .eth_dst = "\xff\xff\xff\xff\xff\xff",
448 /* Forward function declarations */
449 static void map_port_queue_stats_mapping_registers(portid_t pi,
450 struct rte_port *port);
451 static void check_all_ports_link_status(uint32_t port_mask);
452 static int eth_event_callback(portid_t port_id,
453 enum rte_eth_event_type type,
454 void *param, void *ret_param);
455 static void eth_dev_event_callback(char *device_name,
456 enum rte_dev_event_type type,
458 static int eth_dev_event_callback_register(void);
459 static int eth_dev_event_callback_unregister(void);
463 * Check if all the ports are started.
464 * If yes, return positive value. If not, return zero.
466 static int all_ports_started(void);
468 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
469 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
472 * Helper function to check if socket is already discovered.
473 * If yes, return positive value. If not, return zero.
476 new_socket_id(unsigned int socket_id)
480 for (i = 0; i < num_sockets; i++) {
481 if (socket_ids[i] == socket_id)
488 * Setup default configuration.
491 set_default_fwd_lcores_config(void)
495 unsigned int sock_num;
498 for (i = 0; i < RTE_MAX_LCORE; i++) {
499 if (!rte_lcore_is_enabled(i))
501 sock_num = rte_lcore_to_socket_id(i);
502 if (new_socket_id(sock_num)) {
503 if (num_sockets >= RTE_MAX_NUMA_NODES) {
504 rte_exit(EXIT_FAILURE,
505 "Total sockets greater than %u\n",
508 socket_ids[num_sockets++] = sock_num;
510 if (i == rte_get_master_lcore())
512 fwd_lcores_cpuids[nb_lc++] = i;
514 nb_lcores = (lcoreid_t) nb_lc;
515 nb_cfg_lcores = nb_lcores;
520 set_def_peer_eth_addrs(void)
524 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
525 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
526 peer_eth_addrs[i].addr_bytes[5] = i;
531 set_default_fwd_ports_config(void)
536 RTE_ETH_FOREACH_DEV(pt_id)
537 fwd_ports_ids[i++] = pt_id;
539 nb_cfg_ports = nb_ports;
540 nb_fwd_ports = nb_ports;
544 set_def_fwd_config(void)
546 set_default_fwd_lcores_config();
547 set_def_peer_eth_addrs();
548 set_default_fwd_ports_config();
551 /* extremely pessimistic estimation of memory required to create a mempool */
553 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
555 unsigned int n_pages, mbuf_per_pg, leftover;
556 uint64_t total_mem, mbuf_mem, obj_sz;
558 /* there is no good way to predict how much space the mempool will
559 * occupy because it will allocate chunks on the fly, and some of those
560 * will come from default DPDK memory while some will come from our
561 * external memory, so just assume 128MB will be enough for everyone.
563 uint64_t hdr_mem = 128 << 20;
565 /* account for possible non-contiguousness */
566 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
568 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
572 mbuf_per_pg = pgsz / obj_sz;
573 leftover = (nb_mbufs % mbuf_per_pg) > 0;
574 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
576 mbuf_mem = n_pages * pgsz;
578 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
580 if (total_mem > SIZE_MAX) {
581 TESTPMD_LOG(ERR, "Memory size too big\n");
584 *out = (size_t)total_mem;
589 static inline uint32_t
592 return (uint32_t)__builtin_ctzll(v);
595 static inline uint32_t
600 v = rte_align64pow2(v);
605 pagesz_flags(uint64_t page_sz)
607 /* as per mmap() manpage, all page sizes are log2 of page size
608 * shifted by MAP_HUGE_SHIFT
610 int log2 = log2_u64(page_sz);
612 return (log2 << HUGE_SHIFT);
616 alloc_mem(size_t memsz, size_t pgsz, bool huge)
621 /* allocate anonymous hugepages */
622 flags = MAP_ANONYMOUS | MAP_PRIVATE;
624 flags |= HUGE_FLAG | pagesz_flags(pgsz);
626 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
627 if (addr == MAP_FAILED)
633 struct extmem_param {
637 rte_iova_t *iova_table;
638 unsigned int iova_table_len;
642 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
645 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
646 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
647 unsigned int cur_page, n_pages, pgsz_idx;
648 size_t mem_sz, cur_pgsz;
649 rte_iova_t *iovas = NULL;
653 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
654 /* skip anything that is too big */
655 if (pgsizes[pgsz_idx] > SIZE_MAX)
658 cur_pgsz = pgsizes[pgsz_idx];
660 /* if we were told not to allocate hugepages, override */
662 cur_pgsz = sysconf(_SC_PAGESIZE);
664 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
666 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
670 /* allocate our memory */
671 addr = alloc_mem(mem_sz, cur_pgsz, huge);
673 /* if we couldn't allocate memory with a specified page size,
674 * that doesn't mean we can't do it with other page sizes, so
680 /* store IOVA addresses for every page in this memory area */
681 n_pages = mem_sz / cur_pgsz;
683 iovas = malloc(sizeof(*iovas) * n_pages);
686 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
689 /* lock memory if it's not huge pages */
693 /* populate IOVA addresses */
694 for (cur_page = 0; cur_page < n_pages; cur_page++) {
699 offset = cur_pgsz * cur_page;
700 cur = RTE_PTR_ADD(addr, offset);
702 /* touch the page before getting its IOVA */
703 *(volatile char *)cur = 0;
705 iova = rte_mem_virt2iova(cur);
707 iovas[cur_page] = iova;
712 /* if we couldn't allocate anything */
718 param->pgsz = cur_pgsz;
719 param->iova_table = iovas;
720 param->iova_table_len = n_pages;
727 munmap(addr, mem_sz);
733 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
735 struct extmem_param param;
738 memset(¶m, 0, sizeof(param));
740 /* check if our heap exists */
741 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
743 /* create our heap */
744 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
746 TESTPMD_LOG(ERR, "Cannot create heap\n");
751 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
753 TESTPMD_LOG(ERR, "Cannot create memory area\n");
757 /* we now have a valid memory area, so add it to heap */
758 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
759 param.addr, param.len, param.iova_table,
760 param.iova_table_len, param.pgsz);
762 /* when using VFIO, memory is automatically mapped for DMA by EAL */
764 /* not needed any more */
765 free(param.iova_table);
768 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
769 munmap(param.addr, param.len);
775 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
782 * Configuration initialisation done once at init time.
785 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
786 unsigned int socket_id)
788 char pool_name[RTE_MEMPOOL_NAMESIZE];
789 struct rte_mempool *rte_mp = NULL;
792 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
793 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
796 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
797 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
799 switch (mp_alloc_type) {
800 case MP_ALLOC_NATIVE:
802 /* wrapper to rte_mempool_create() */
803 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
804 rte_mbuf_best_mempool_ops());
805 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
806 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
811 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
812 mb_size, (unsigned int) mb_mempool_cache,
813 sizeof(struct rte_pktmbuf_pool_private),
818 if (rte_mempool_populate_anon(rte_mp) == 0) {
819 rte_mempool_free(rte_mp);
823 rte_pktmbuf_pool_init(rte_mp, NULL);
824 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
828 case MP_ALLOC_XMEM_HUGE:
831 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
833 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
834 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
837 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
839 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
841 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
842 rte_mbuf_best_mempool_ops());
843 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
844 mb_mempool_cache, 0, mbuf_seg_size,
850 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
855 if (rte_mp == NULL) {
856 rte_exit(EXIT_FAILURE,
857 "Creation of mbuf pool for socket %u failed: %s\n",
858 socket_id, rte_strerror(rte_errno));
859 } else if (verbose_level > 0) {
860 rte_mempool_dump(stdout, rte_mp);
865 * Check given socket id is valid or not with NUMA mode,
866 * if valid, return 0, else return -1
869 check_socket_id(const unsigned int socket_id)
871 static int warning_once = 0;
873 if (new_socket_id(socket_id)) {
874 if (!warning_once && numa_support)
875 printf("Warning: NUMA should be configured manually by"
876 " using --port-numa-config and"
877 " --ring-numa-config parameters along with"
886 * Get the allowed maximum number of RX queues.
887 * *pid return the port id which has minimal value of
888 * max_rx_queues in all ports.
891 get_allowed_max_nb_rxq(portid_t *pid)
893 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
895 struct rte_eth_dev_info dev_info;
897 RTE_ETH_FOREACH_DEV(pi) {
898 rte_eth_dev_info_get(pi, &dev_info);
899 if (dev_info.max_rx_queues < allowed_max_rxq) {
900 allowed_max_rxq = dev_info.max_rx_queues;
904 return allowed_max_rxq;
908 * Check input rxq is valid or not.
909 * If input rxq is not greater than any of maximum number
910 * of RX queues of all ports, it is valid.
911 * if valid, return 0, else return -1
914 check_nb_rxq(queueid_t rxq)
916 queueid_t allowed_max_rxq;
919 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
920 if (rxq > allowed_max_rxq) {
921 printf("Fail: input rxq (%u) can't be greater "
922 "than max_rx_queues (%u) of port %u\n",
932 * Get the allowed maximum number of TX queues.
933 * *pid return the port id which has minimal value of
934 * max_tx_queues in all ports.
937 get_allowed_max_nb_txq(portid_t *pid)
939 queueid_t allowed_max_txq = MAX_QUEUE_ID;
941 struct rte_eth_dev_info dev_info;
943 RTE_ETH_FOREACH_DEV(pi) {
944 rte_eth_dev_info_get(pi, &dev_info);
945 if (dev_info.max_tx_queues < allowed_max_txq) {
946 allowed_max_txq = dev_info.max_tx_queues;
950 return allowed_max_txq;
954 * Check input txq is valid or not.
955 * If input txq is not greater than any of maximum number
956 * of TX queues of all ports, it is valid.
957 * if valid, return 0, else return -1
960 check_nb_txq(queueid_t txq)
962 queueid_t allowed_max_txq;
965 allowed_max_txq = get_allowed_max_nb_txq(&pid);
966 if (txq > allowed_max_txq) {
967 printf("Fail: input txq (%u) can't be greater "
968 "than max_tx_queues (%u) of port %u\n",
981 struct rte_port *port;
982 struct rte_mempool *mbp;
983 unsigned int nb_mbuf_per_pool;
985 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
986 struct rte_gro_param gro_param;
990 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
993 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
994 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
995 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
998 /* Configuration of logical cores. */
999 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1000 sizeof(struct fwd_lcore *) * nb_lcores,
1001 RTE_CACHE_LINE_SIZE);
1002 if (fwd_lcores == NULL) {
1003 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1004 "failed\n", nb_lcores);
1006 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1007 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1008 sizeof(struct fwd_lcore),
1009 RTE_CACHE_LINE_SIZE);
1010 if (fwd_lcores[lc_id] == NULL) {
1011 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1014 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1017 RTE_ETH_FOREACH_DEV(pid) {
1019 /* Apply default TxRx configuration for all ports */
1020 port->dev_conf.txmode = tx_mode;
1021 port->dev_conf.rxmode = rx_mode;
1022 rte_eth_dev_info_get(pid, &port->dev_info);
1024 if (!(port->dev_info.tx_offload_capa &
1025 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1026 port->dev_conf.txmode.offloads &=
1027 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1029 if (port_numa[pid] != NUMA_NO_CONFIG)
1030 port_per_socket[port_numa[pid]]++;
1032 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1034 /* if socket_id is invalid, set to 0 */
1035 if (check_socket_id(socket_id) < 0)
1037 port_per_socket[socket_id]++;
1041 /* Apply Rx offloads configuration */
1042 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1043 port->rx_conf[k].offloads =
1044 port->dev_conf.rxmode.offloads;
1045 /* Apply Tx offloads configuration */
1046 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1047 port->tx_conf[k].offloads =
1048 port->dev_conf.txmode.offloads;
1050 /* set flag to initialize port/queue */
1051 port->need_reconfig = 1;
1052 port->need_reconfig_queues = 1;
1056 * Create pools of mbuf.
1057 * If NUMA support is disabled, create a single pool of mbuf in
1058 * socket 0 memory by default.
1059 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1061 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1062 * nb_txd can be configured at run time.
1064 if (param_total_num_mbufs)
1065 nb_mbuf_per_pool = param_total_num_mbufs;
1067 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1068 (nb_lcores * mb_mempool_cache) +
1069 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1070 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1076 for (i = 0; i < num_sockets; i++)
1077 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1080 if (socket_num == UMA_NO_CONFIG)
1081 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1083 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1089 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1090 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1092 * Records which Mbuf pool to use by each logical core, if needed.
1094 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1095 mbp = mbuf_pool_find(
1096 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1099 mbp = mbuf_pool_find(0);
1100 fwd_lcores[lc_id]->mbp = mbp;
1101 /* initialize GSO context */
1102 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1103 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1104 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1105 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1107 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1110 /* Configuration of packet forwarding streams. */
1111 if (init_fwd_streams() < 0)
1112 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1116 /* create a gro context for each lcore */
1117 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1118 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1119 gro_param.max_item_per_flow = MAX_PKT_BURST;
1120 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1121 gro_param.socket_id = rte_lcore_to_socket_id(
1122 fwd_lcores_cpuids[lc_id]);
1123 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1124 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1125 rte_exit(EXIT_FAILURE,
1126 "rte_gro_ctx_create() failed\n");
1130 #if defined RTE_LIBRTE_PMD_SOFTNIC
1131 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1132 RTE_ETH_FOREACH_DEV(pid) {
1134 const char *driver = port->dev_info.driver_name;
1136 if (strcmp(driver, "net_softnic") == 0)
1137 port->softport.fwd_lcore_arg = fwd_lcores;
1146 reconfig(portid_t new_port_id, unsigned socket_id)
1148 struct rte_port *port;
1150 /* Reconfiguration of Ethernet ports. */
1151 port = &ports[new_port_id];
1152 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1154 /* set flag to initialize port/queue */
1155 port->need_reconfig = 1;
1156 port->need_reconfig_queues = 1;
1157 port->socket_id = socket_id;
1164 init_fwd_streams(void)
1167 struct rte_port *port;
1168 streamid_t sm_id, nb_fwd_streams_new;
1171 /* set socket id according to numa or not */
1172 RTE_ETH_FOREACH_DEV(pid) {
1174 if (nb_rxq > port->dev_info.max_rx_queues) {
1175 printf("Fail: nb_rxq(%d) is greater than "
1176 "max_rx_queues(%d)\n", nb_rxq,
1177 port->dev_info.max_rx_queues);
1180 if (nb_txq > port->dev_info.max_tx_queues) {
1181 printf("Fail: nb_txq(%d) is greater than "
1182 "max_tx_queues(%d)\n", nb_txq,
1183 port->dev_info.max_tx_queues);
1187 if (port_numa[pid] != NUMA_NO_CONFIG)
1188 port->socket_id = port_numa[pid];
1190 port->socket_id = rte_eth_dev_socket_id(pid);
1192 /* if socket_id is invalid, set to 0 */
1193 if (check_socket_id(port->socket_id) < 0)
1194 port->socket_id = 0;
1198 if (socket_num == UMA_NO_CONFIG)
1199 port->socket_id = 0;
1201 port->socket_id = socket_num;
1205 q = RTE_MAX(nb_rxq, nb_txq);
1207 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1210 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1211 if (nb_fwd_streams_new == nb_fwd_streams)
1214 if (fwd_streams != NULL) {
1215 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1216 if (fwd_streams[sm_id] == NULL)
1218 rte_free(fwd_streams[sm_id]);
1219 fwd_streams[sm_id] = NULL;
1221 rte_free(fwd_streams);
1226 nb_fwd_streams = nb_fwd_streams_new;
1227 if (nb_fwd_streams) {
1228 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1229 sizeof(struct fwd_stream *) * nb_fwd_streams,
1230 RTE_CACHE_LINE_SIZE);
1231 if (fwd_streams == NULL)
1232 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1233 " (struct fwd_stream *)) failed\n",
1236 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1237 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1238 " struct fwd_stream", sizeof(struct fwd_stream),
1239 RTE_CACHE_LINE_SIZE);
1240 if (fwd_streams[sm_id] == NULL)
1241 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1242 "(struct fwd_stream) failed\n");
1249 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1251 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1253 unsigned int total_burst;
1254 unsigned int nb_burst;
1255 unsigned int burst_stats[3];
1256 uint16_t pktnb_stats[3];
1258 int burst_percent[3];
1261 * First compute the total number of packet bursts and the
1262 * two highest numbers of bursts of the same number of packets.
1265 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1266 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1267 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1268 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1271 total_burst += nb_burst;
1272 if (nb_burst > burst_stats[0]) {
1273 burst_stats[1] = burst_stats[0];
1274 pktnb_stats[1] = pktnb_stats[0];
1275 burst_stats[0] = nb_burst;
1276 pktnb_stats[0] = nb_pkt;
1277 } else if (nb_burst > burst_stats[1]) {
1278 burst_stats[1] = nb_burst;
1279 pktnb_stats[1] = nb_pkt;
1282 if (total_burst == 0)
1284 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1285 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1286 burst_percent[0], (int) pktnb_stats[0]);
1287 if (burst_stats[0] == total_burst) {
1291 if (burst_stats[0] + burst_stats[1] == total_burst) {
1292 printf(" + %d%% of %d pkts]\n",
1293 100 - burst_percent[0], pktnb_stats[1]);
1296 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1297 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1298 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1299 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1302 printf(" + %d%% of %d pkts + %d%% of others]\n",
1303 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1305 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1308 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1310 struct rte_port *port;
1313 static const char *fwd_stats_border = "----------------------";
1315 port = &ports[port_id];
1316 printf("\n %s Forward statistics for port %-2d %s\n",
1317 fwd_stats_border, port_id, fwd_stats_border);
1319 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1320 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1322 stats->ipackets, stats->imissed,
1323 (uint64_t) (stats->ipackets + stats->imissed));
1325 if (cur_fwd_eng == &csum_fwd_engine)
1326 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1327 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1328 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1329 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1330 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1333 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1335 stats->opackets, port->tx_dropped,
1336 (uint64_t) (stats->opackets + port->tx_dropped));
1339 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1341 stats->ipackets, stats->imissed,
1342 (uint64_t) (stats->ipackets + stats->imissed));
1344 if (cur_fwd_eng == &csum_fwd_engine)
1345 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1346 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1347 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1348 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1349 printf(" RX-nombufs: %14"PRIu64"\n",
1353 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1355 stats->opackets, port->tx_dropped,
1356 (uint64_t) (stats->opackets + port->tx_dropped));
1359 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1360 if (port->rx_stream)
1361 pkt_burst_stats_display("RX",
1362 &port->rx_stream->rx_burst_stats);
1363 if (port->tx_stream)
1364 pkt_burst_stats_display("TX",
1365 &port->tx_stream->tx_burst_stats);
1368 if (port->rx_queue_stats_mapping_enabled) {
1370 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1371 printf(" Stats reg %2d RX-packets:%14"PRIu64
1372 " RX-errors:%14"PRIu64
1373 " RX-bytes:%14"PRIu64"\n",
1374 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1378 if (port->tx_queue_stats_mapping_enabled) {
1379 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1380 printf(" Stats reg %2d TX-packets:%14"PRIu64
1381 " TX-bytes:%14"PRIu64"\n",
1382 i, stats->q_opackets[i], stats->q_obytes[i]);
1386 printf(" %s--------------------------------%s\n",
1387 fwd_stats_border, fwd_stats_border);
1391 fwd_stream_stats_display(streamid_t stream_id)
1393 struct fwd_stream *fs;
1394 static const char *fwd_top_stats_border = "-------";
1396 fs = fwd_streams[stream_id];
1397 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1398 (fs->fwd_dropped == 0))
1400 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1401 "TX Port=%2d/Queue=%2d %s\n",
1402 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1403 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1404 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1405 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1407 /* if checksum mode */
1408 if (cur_fwd_eng == &csum_fwd_engine) {
1409 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1410 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1413 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1414 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1415 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1420 flush_fwd_rx_queues(void)
1422 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1429 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1430 uint64_t timer_period;
1432 /* convert to number of cycles */
1433 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1435 for (j = 0; j < 2; j++) {
1436 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1437 for (rxq = 0; rxq < nb_rxq; rxq++) {
1438 port_id = fwd_ports_ids[rxp];
1440 * testpmd can stuck in the below do while loop
1441 * if rte_eth_rx_burst() always returns nonzero
1442 * packets. So timer is added to exit this loop
1443 * after 1sec timer expiry.
1445 prev_tsc = rte_rdtsc();
1447 nb_rx = rte_eth_rx_burst(port_id, rxq,
1448 pkts_burst, MAX_PKT_BURST);
1449 for (i = 0; i < nb_rx; i++)
1450 rte_pktmbuf_free(pkts_burst[i]);
1452 cur_tsc = rte_rdtsc();
1453 diff_tsc = cur_tsc - prev_tsc;
1454 timer_tsc += diff_tsc;
1455 } while ((nb_rx > 0) &&
1456 (timer_tsc < timer_period));
1460 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1465 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1467 struct fwd_stream **fsm;
1470 #ifdef RTE_LIBRTE_BITRATE
1471 uint64_t tics_per_1sec;
1472 uint64_t tics_datum;
1473 uint64_t tics_current;
1474 uint16_t i, cnt_ports;
1476 cnt_ports = nb_ports;
1477 tics_datum = rte_rdtsc();
1478 tics_per_1sec = rte_get_timer_hz();
1480 fsm = &fwd_streams[fc->stream_idx];
1481 nb_fs = fc->stream_nb;
1483 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1484 (*pkt_fwd)(fsm[sm_id]);
1485 #ifdef RTE_LIBRTE_BITRATE
1486 if (bitrate_enabled != 0 &&
1487 bitrate_lcore_id == rte_lcore_id()) {
1488 tics_current = rte_rdtsc();
1489 if (tics_current - tics_datum >= tics_per_1sec) {
1490 /* Periodic bitrate calculation */
1491 for (i = 0; i < cnt_ports; i++)
1492 rte_stats_bitrate_calc(bitrate_data,
1494 tics_datum = tics_current;
1498 #ifdef RTE_LIBRTE_LATENCY_STATS
1499 if (latencystats_enabled != 0 &&
1500 latencystats_lcore_id == rte_lcore_id())
1501 rte_latencystats_update();
1504 } while (! fc->stopped);
1508 start_pkt_forward_on_core(void *fwd_arg)
1510 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1511 cur_fwd_config.fwd_eng->packet_fwd);
1516 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1517 * Used to start communication flows in network loopback test configurations.
1520 run_one_txonly_burst_on_core(void *fwd_arg)
1522 struct fwd_lcore *fwd_lc;
1523 struct fwd_lcore tmp_lcore;
1525 fwd_lc = (struct fwd_lcore *) fwd_arg;
1526 tmp_lcore = *fwd_lc;
1527 tmp_lcore.stopped = 1;
1528 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1533 * Launch packet forwarding:
1534 * - Setup per-port forwarding context.
1535 * - launch logical cores with their forwarding configuration.
1538 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1540 port_fwd_begin_t port_fwd_begin;
1545 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1546 if (port_fwd_begin != NULL) {
1547 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1548 (*port_fwd_begin)(fwd_ports_ids[i]);
1550 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1551 lc_id = fwd_lcores_cpuids[i];
1552 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1553 fwd_lcores[i]->stopped = 0;
1554 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1555 fwd_lcores[i], lc_id);
1557 printf("launch lcore %u failed - diag=%d\n",
1564 * Update the forward ports list.
1567 update_fwd_ports(portid_t new_pid)
1570 unsigned int new_nb_fwd_ports = 0;
1573 for (i = 0; i < nb_fwd_ports; ++i) {
1574 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1577 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1581 if (new_pid < RTE_MAX_ETHPORTS)
1582 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1584 nb_fwd_ports = new_nb_fwd_ports;
1585 nb_cfg_ports = new_nb_fwd_ports;
1589 * Launch packet forwarding configuration.
1592 start_packet_forwarding(int with_tx_first)
1594 port_fwd_begin_t port_fwd_begin;
1595 port_fwd_end_t port_fwd_end;
1596 struct rte_port *port;
1601 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1602 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1604 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1605 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1607 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1608 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1609 (!nb_rxq || !nb_txq))
1610 rte_exit(EXIT_FAILURE,
1611 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1612 cur_fwd_eng->fwd_mode_name);
1614 if (all_ports_started() == 0) {
1615 printf("Not all ports were started\n");
1618 if (test_done == 0) {
1619 printf("Packet forwarding already started\n");
1625 for (i = 0; i < nb_fwd_ports; i++) {
1626 pt_id = fwd_ports_ids[i];
1627 port = &ports[pt_id];
1628 if (!port->dcb_flag) {
1629 printf("In DCB mode, all forwarding ports must "
1630 "be configured in this mode.\n");
1634 if (nb_fwd_lcores == 1) {
1635 printf("In DCB mode,the nb forwarding cores "
1636 "should be larger than 1.\n");
1645 flush_fwd_rx_queues();
1647 pkt_fwd_config_display(&cur_fwd_config);
1648 rxtx_config_display();
1650 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1651 pt_id = fwd_ports_ids[i];
1652 port = &ports[pt_id];
1653 rte_eth_stats_get(pt_id, &port->stats);
1654 port->tx_dropped = 0;
1656 map_port_queue_stats_mapping_registers(pt_id, port);
1658 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1659 fwd_streams[sm_id]->rx_packets = 0;
1660 fwd_streams[sm_id]->tx_packets = 0;
1661 fwd_streams[sm_id]->fwd_dropped = 0;
1662 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1663 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1665 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1666 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1667 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1668 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1669 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1671 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1672 fwd_streams[sm_id]->core_cycles = 0;
1675 if (with_tx_first) {
1676 port_fwd_begin = tx_only_engine.port_fwd_begin;
1677 if (port_fwd_begin != NULL) {
1678 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1679 (*port_fwd_begin)(fwd_ports_ids[i]);
1681 while (with_tx_first--) {
1682 launch_packet_forwarding(
1683 run_one_txonly_burst_on_core);
1684 rte_eal_mp_wait_lcore();
1686 port_fwd_end = tx_only_engine.port_fwd_end;
1687 if (port_fwd_end != NULL) {
1688 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1689 (*port_fwd_end)(fwd_ports_ids[i]);
1692 launch_packet_forwarding(start_pkt_forward_on_core);
1696 stop_packet_forwarding(void)
1698 struct rte_eth_stats stats;
1699 struct rte_port *port;
1700 port_fwd_end_t port_fwd_end;
1705 uint64_t total_recv;
1706 uint64_t total_xmit;
1707 uint64_t total_rx_dropped;
1708 uint64_t total_tx_dropped;
1709 uint64_t total_rx_nombuf;
1710 uint64_t tx_dropped;
1711 uint64_t rx_bad_ip_csum;
1712 uint64_t rx_bad_l4_csum;
1713 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1714 uint64_t fwd_cycles;
1717 static const char *acc_stats_border = "+++++++++++++++";
1720 printf("Packet forwarding not started\n");
1723 printf("Telling cores to stop...");
1724 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1725 fwd_lcores[lc_id]->stopped = 1;
1726 printf("\nWaiting for lcores to finish...\n");
1727 rte_eal_mp_wait_lcore();
1728 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1729 if (port_fwd_end != NULL) {
1730 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1731 pt_id = fwd_ports_ids[i];
1732 (*port_fwd_end)(pt_id);
1735 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1738 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1739 if (cur_fwd_config.nb_fwd_streams >
1740 cur_fwd_config.nb_fwd_ports) {
1741 fwd_stream_stats_display(sm_id);
1742 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1743 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1745 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1747 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1750 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1751 tx_dropped = (uint64_t) (tx_dropped +
1752 fwd_streams[sm_id]->fwd_dropped);
1753 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1756 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1757 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1758 fwd_streams[sm_id]->rx_bad_ip_csum);
1759 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1763 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1764 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1765 fwd_streams[sm_id]->rx_bad_l4_csum);
1766 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1769 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1770 fwd_cycles = (uint64_t) (fwd_cycles +
1771 fwd_streams[sm_id]->core_cycles);
1776 total_rx_dropped = 0;
1777 total_tx_dropped = 0;
1778 total_rx_nombuf = 0;
1779 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1780 pt_id = fwd_ports_ids[i];
1782 port = &ports[pt_id];
1783 rte_eth_stats_get(pt_id, &stats);
1784 stats.ipackets -= port->stats.ipackets;
1785 port->stats.ipackets = 0;
1786 stats.opackets -= port->stats.opackets;
1787 port->stats.opackets = 0;
1788 stats.ibytes -= port->stats.ibytes;
1789 port->stats.ibytes = 0;
1790 stats.obytes -= port->stats.obytes;
1791 port->stats.obytes = 0;
1792 stats.imissed -= port->stats.imissed;
1793 port->stats.imissed = 0;
1794 stats.oerrors -= port->stats.oerrors;
1795 port->stats.oerrors = 0;
1796 stats.rx_nombuf -= port->stats.rx_nombuf;
1797 port->stats.rx_nombuf = 0;
1799 total_recv += stats.ipackets;
1800 total_xmit += stats.opackets;
1801 total_rx_dropped += stats.imissed;
1802 total_tx_dropped += port->tx_dropped;
1803 total_rx_nombuf += stats.rx_nombuf;
1805 fwd_port_stats_display(pt_id, &stats);
1808 printf("\n %s Accumulated forward statistics for all ports"
1810 acc_stats_border, acc_stats_border);
1811 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1813 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1815 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1816 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1817 if (total_rx_nombuf > 0)
1818 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1819 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1821 acc_stats_border, acc_stats_border);
1822 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1824 printf("\n CPU cycles/packet=%u (total cycles="
1825 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1826 (unsigned int)(fwd_cycles / total_recv),
1827 fwd_cycles, total_recv);
1829 printf("\nDone.\n");
1834 dev_set_link_up(portid_t pid)
1836 if (rte_eth_dev_set_link_up(pid) < 0)
1837 printf("\nSet link up fail.\n");
1841 dev_set_link_down(portid_t pid)
1843 if (rte_eth_dev_set_link_down(pid) < 0)
1844 printf("\nSet link down fail.\n");
1848 all_ports_started(void)
1851 struct rte_port *port;
1853 RTE_ETH_FOREACH_DEV(pi) {
1855 /* Check if there is a port which is not started */
1856 if ((port->port_status != RTE_PORT_STARTED) &&
1857 (port->slave_flag == 0))
1861 /* No port is not started */
1866 port_is_stopped(portid_t port_id)
1868 struct rte_port *port = &ports[port_id];
1870 if ((port->port_status != RTE_PORT_STOPPED) &&
1871 (port->slave_flag == 0))
1877 all_ports_stopped(void)
1881 RTE_ETH_FOREACH_DEV(pi) {
1882 if (!port_is_stopped(pi))
1890 port_is_started(portid_t port_id)
1892 if (port_id_is_invalid(port_id, ENABLED_WARN))
1895 if (ports[port_id].port_status != RTE_PORT_STARTED)
1902 port_is_closed(portid_t port_id)
1904 if (port_id_is_invalid(port_id, ENABLED_WARN))
1907 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1914 start_port(portid_t pid)
1916 int diag, need_check_link_status = -1;
1919 struct rte_port *port;
1920 struct ether_addr mac_addr;
1921 enum rte_eth_event_type event_type;
1923 if (port_id_is_invalid(pid, ENABLED_WARN))
1928 RTE_ETH_FOREACH_DEV(pi) {
1929 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1932 need_check_link_status = 0;
1934 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1935 RTE_PORT_HANDLING) == 0) {
1936 printf("Port %d is now not stopped\n", pi);
1940 if (port->need_reconfig > 0) {
1941 port->need_reconfig = 0;
1943 if (flow_isolate_all) {
1944 int ret = port_flow_isolate(pi, 1);
1946 printf("Failed to apply isolated"
1947 " mode on port %d\n", pi);
1952 printf("Configuring Port %d (socket %u)\n", pi,
1954 /* configure port */
1955 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1958 if (rte_atomic16_cmpset(&(port->port_status),
1959 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1960 printf("Port %d can not be set back "
1961 "to stopped\n", pi);
1962 printf("Fail to configure port %d\n", pi);
1963 /* try to reconfigure port next time */
1964 port->need_reconfig = 1;
1968 if (port->need_reconfig_queues > 0) {
1969 port->need_reconfig_queues = 0;
1970 /* setup tx queues */
1971 for (qi = 0; qi < nb_txq; qi++) {
1972 if ((numa_support) &&
1973 (txring_numa[pi] != NUMA_NO_CONFIG))
1974 diag = rte_eth_tx_queue_setup(pi, qi,
1975 port->nb_tx_desc[qi],
1977 &(port->tx_conf[qi]));
1979 diag = rte_eth_tx_queue_setup(pi, qi,
1980 port->nb_tx_desc[qi],
1982 &(port->tx_conf[qi]));
1987 /* Fail to setup tx queue, return */
1988 if (rte_atomic16_cmpset(&(port->port_status),
1990 RTE_PORT_STOPPED) == 0)
1991 printf("Port %d can not be set back "
1992 "to stopped\n", pi);
1993 printf("Fail to configure port %d tx queues\n",
1995 /* try to reconfigure queues next time */
1996 port->need_reconfig_queues = 1;
1999 for (qi = 0; qi < nb_rxq; qi++) {
2000 /* setup rx queues */
2001 if ((numa_support) &&
2002 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2003 struct rte_mempool * mp =
2004 mbuf_pool_find(rxring_numa[pi]);
2006 printf("Failed to setup RX queue:"
2007 "No mempool allocation"
2008 " on the socket %d\n",
2013 diag = rte_eth_rx_queue_setup(pi, qi,
2014 port->nb_rx_desc[qi],
2016 &(port->rx_conf[qi]),
2019 struct rte_mempool *mp =
2020 mbuf_pool_find(port->socket_id);
2022 printf("Failed to setup RX queue:"
2023 "No mempool allocation"
2024 " on the socket %d\n",
2028 diag = rte_eth_rx_queue_setup(pi, qi,
2029 port->nb_rx_desc[qi],
2031 &(port->rx_conf[qi]),
2037 /* Fail to setup rx queue, return */
2038 if (rte_atomic16_cmpset(&(port->port_status),
2040 RTE_PORT_STOPPED) == 0)
2041 printf("Port %d can not be set back "
2042 "to stopped\n", pi);
2043 printf("Fail to configure port %d rx queues\n",
2045 /* try to reconfigure queues next time */
2046 port->need_reconfig_queues = 1;
2052 if (rte_eth_dev_start(pi) < 0) {
2053 printf("Fail to start port %d\n", pi);
2055 /* Fail to setup rx queue, return */
2056 if (rte_atomic16_cmpset(&(port->port_status),
2057 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2058 printf("Port %d can not be set back to "
2063 if (rte_atomic16_cmpset(&(port->port_status),
2064 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2065 printf("Port %d can not be set into started\n", pi);
2067 rte_eth_macaddr_get(pi, &mac_addr);
2068 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2069 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2070 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2071 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2073 /* at least one port started, need checking link status */
2074 need_check_link_status = 1;
2077 for (event_type = RTE_ETH_EVENT_UNKNOWN;
2078 event_type < RTE_ETH_EVENT_MAX;
2080 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2085 printf("Failed to setup even callback for event %d\n",
2091 if (need_check_link_status == 1 && !no_link_check)
2092 check_all_ports_link_status(RTE_PORT_ALL);
2093 else if (need_check_link_status == 0)
2094 printf("Please stop the ports first\n");
2101 stop_port(portid_t pid)
2104 struct rte_port *port;
2105 int need_check_link_status = 0;
2112 if (port_id_is_invalid(pid, ENABLED_WARN))
2115 printf("Stopping ports...\n");
2117 RTE_ETH_FOREACH_DEV(pi) {
2118 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2121 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2122 printf("Please remove port %d from forwarding configuration.\n", pi);
2126 if (port_is_bonding_slave(pi)) {
2127 printf("Please remove port %d from bonded device.\n", pi);
2132 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2133 RTE_PORT_HANDLING) == 0)
2136 rte_eth_dev_stop(pi);
2138 if (rte_atomic16_cmpset(&(port->port_status),
2139 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2140 printf("Port %d can not be set into stopped\n", pi);
2141 need_check_link_status = 1;
2143 if (need_check_link_status && !no_link_check)
2144 check_all_ports_link_status(RTE_PORT_ALL);
2150 close_port(portid_t pid)
2153 struct rte_port *port;
2155 if (port_id_is_invalid(pid, ENABLED_WARN))
2158 printf("Closing ports...\n");
2160 RTE_ETH_FOREACH_DEV(pi) {
2161 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2164 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2165 printf("Please remove port %d from forwarding configuration.\n", pi);
2169 if (port_is_bonding_slave(pi)) {
2170 printf("Please remove port %d from bonded device.\n", pi);
2175 if (rte_atomic16_cmpset(&(port->port_status),
2176 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2177 printf("Port %d is already closed\n", pi);
2181 if (rte_atomic16_cmpset(&(port->port_status),
2182 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2183 printf("Port %d is now not stopped\n", pi);
2187 if (port->flow_list)
2188 port_flow_flush(pi);
2189 rte_eth_dev_close(pi);
2191 if (rte_atomic16_cmpset(&(port->port_status),
2192 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2193 printf("Port %d cannot be set to closed\n", pi);
2200 reset_port(portid_t pid)
2204 struct rte_port *port;
2206 if (port_id_is_invalid(pid, ENABLED_WARN))
2209 printf("Resetting ports...\n");
2211 RTE_ETH_FOREACH_DEV(pi) {
2212 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2215 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2216 printf("Please remove port %d from forwarding "
2217 "configuration.\n", pi);
2221 if (port_is_bonding_slave(pi)) {
2222 printf("Please remove port %d from bonded device.\n",
2227 diag = rte_eth_dev_reset(pi);
2230 port->need_reconfig = 1;
2231 port->need_reconfig_queues = 1;
2233 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2241 eth_dev_event_callback_register(void)
2245 /* register the device event callback */
2246 ret = rte_dev_event_callback_register(NULL,
2247 eth_dev_event_callback, NULL);
2249 printf("Failed to register device event callback\n");
2258 eth_dev_event_callback_unregister(void)
2262 /* unregister the device event callback */
2263 ret = rte_dev_event_callback_unregister(NULL,
2264 eth_dev_event_callback, NULL);
2266 printf("Failed to unregister device event callback\n");
2274 attach_port(char *identifier)
2277 unsigned int socket_id;
2279 printf("Attaching a new port...\n");
2281 if (identifier == NULL) {
2282 printf("Invalid parameters are specified\n");
2286 if (rte_eth_dev_attach(identifier, &pi))
2289 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2290 /* if socket_id is invalid, set to 0 */
2291 if (check_socket_id(socket_id) < 0)
2293 reconfig(pi, socket_id);
2294 rte_eth_promiscuous_enable(pi);
2296 ports_ids[nb_ports] = pi;
2297 nb_ports = rte_eth_dev_count_avail();
2299 ports[pi].port_status = RTE_PORT_STOPPED;
2301 update_fwd_ports(pi);
2303 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2308 detach_port(portid_t port_id)
2310 char name[RTE_ETH_NAME_MAX_LEN];
2313 printf("Detaching a port...\n");
2315 if (!port_is_closed(port_id)) {
2316 printf("Please close port first\n");
2320 if (ports[port_id].flow_list)
2321 port_flow_flush(port_id);
2323 if (rte_eth_dev_detach(port_id, name)) {
2324 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2328 for (i = 0; i < nb_ports; i++) {
2329 if (ports_ids[i] == port_id) {
2330 ports_ids[i] = ports_ids[nb_ports-1];
2331 ports_ids[nb_ports-1] = 0;
2335 nb_ports = rte_eth_dev_count_avail();
2337 update_fwd_ports(RTE_MAX_ETHPORTS);
2339 printf("Port %u is detached. Now total ports is %d\n",
2348 struct rte_device *device;
2353 stop_packet_forwarding();
2355 if (ports != NULL) {
2357 RTE_ETH_FOREACH_DEV(pt_id) {
2358 printf("\nShutting down port %d...\n", pt_id);
2364 * This is a workaround to fix a virtio-user issue that
2365 * requires to call clean-up routine to remove existing
2367 * This workaround valid only for testpmd, needs a fix
2368 * valid for all applications.
2369 * TODO: Implement proper resource cleanup
2371 device = rte_eth_devices[pt_id].device;
2372 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2378 ret = rte_dev_event_monitor_stop();
2381 "fail to stop device event monitor.");
2383 ret = eth_dev_event_callback_unregister();
2386 "fail to unregister all event callbacks.");
2389 printf("\nBye...\n");
2392 typedef void (*cmd_func_t)(void);
2393 struct pmd_test_command {
2394 const char *cmd_name;
2395 cmd_func_t cmd_func;
2398 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2400 /* Check the link status of all ports in up to 9s, and print them finally */
2402 check_all_ports_link_status(uint32_t port_mask)
2404 #define CHECK_INTERVAL 100 /* 100ms */
2405 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2407 uint8_t count, all_ports_up, print_flag = 0;
2408 struct rte_eth_link link;
2410 printf("Checking link statuses...\n");
2412 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2414 RTE_ETH_FOREACH_DEV(portid) {
2415 if ((port_mask & (1 << portid)) == 0)
2417 memset(&link, 0, sizeof(link));
2418 rte_eth_link_get_nowait(portid, &link);
2419 /* print link status if flag set */
2420 if (print_flag == 1) {
2421 if (link.link_status)
2423 "Port%d Link Up. speed %u Mbps- %s\n",
2424 portid, link.link_speed,
2425 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2426 ("full-duplex") : ("half-duplex\n"));
2428 printf("Port %d Link Down\n", portid);
2431 /* clear all_ports_up flag if any link down */
2432 if (link.link_status == ETH_LINK_DOWN) {
2437 /* after finally printing all link status, get out */
2438 if (print_flag == 1)
2441 if (all_ports_up == 0) {
2443 rte_delay_ms(CHECK_INTERVAL);
2446 /* set the print_flag if all ports up or timeout */
2447 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2457 rmv_event_callback(void *arg)
2459 int need_to_start = 0;
2460 int org_no_link_check = no_link_check;
2461 portid_t port_id = (intptr_t)arg;
2463 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2465 if (!test_done && port_is_forwarding(port_id)) {
2467 stop_packet_forwarding();
2471 no_link_check = org_no_link_check;
2472 close_port(port_id);
2473 detach_port(port_id);
2475 start_packet_forwarding(0);
2478 /* This function is used by the interrupt thread */
2480 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2483 static const char * const event_desc[] = {
2484 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2485 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2486 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2487 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2488 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2489 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2490 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2491 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2492 [RTE_ETH_EVENT_NEW] = "device probed",
2493 [RTE_ETH_EVENT_DESTROY] = "device released",
2494 [RTE_ETH_EVENT_MAX] = NULL,
2497 RTE_SET_USED(param);
2498 RTE_SET_USED(ret_param);
2500 if (type >= RTE_ETH_EVENT_MAX) {
2501 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2502 port_id, __func__, type);
2504 } else if (event_print_mask & (UINT32_C(1) << type)) {
2505 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2510 if (port_id_is_invalid(port_id, DISABLED_WARN))
2514 case RTE_ETH_EVENT_INTR_RMV:
2515 if (rte_eal_alarm_set(100000,
2516 rmv_event_callback, (void *)(intptr_t)port_id))
2517 fprintf(stderr, "Could not set up deferred device removal\n");
2525 /* This function is used by the interrupt thread */
2527 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2528 __rte_unused void *arg)
2530 if (type >= RTE_DEV_EVENT_MAX) {
2531 fprintf(stderr, "%s called upon invalid event %d\n",
2537 case RTE_DEV_EVENT_REMOVE:
2538 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2540 /* TODO: After finish failure handle, begin to stop
2541 * packet forward, stop port, close port, detach port.
2544 case RTE_DEV_EVENT_ADD:
2545 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2547 /* TODO: After finish kernel driver binding,
2548 * begin to attach port.
2557 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2561 uint8_t mapping_found = 0;
2563 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2564 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2565 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2566 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2567 tx_queue_stats_mappings[i].queue_id,
2568 tx_queue_stats_mappings[i].stats_counter_id);
2575 port->tx_queue_stats_mapping_enabled = 1;
2580 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2584 uint8_t mapping_found = 0;
2586 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2587 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2588 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2589 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2590 rx_queue_stats_mappings[i].queue_id,
2591 rx_queue_stats_mappings[i].stats_counter_id);
2598 port->rx_queue_stats_mapping_enabled = 1;
2603 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2607 diag = set_tx_queue_stats_mapping_registers(pi, port);
2609 if (diag == -ENOTSUP) {
2610 port->tx_queue_stats_mapping_enabled = 0;
2611 printf("TX queue stats mapping not supported port id=%d\n", pi);
2614 rte_exit(EXIT_FAILURE,
2615 "set_tx_queue_stats_mapping_registers "
2616 "failed for port id=%d diag=%d\n",
2620 diag = set_rx_queue_stats_mapping_registers(pi, port);
2622 if (diag == -ENOTSUP) {
2623 port->rx_queue_stats_mapping_enabled = 0;
2624 printf("RX queue stats mapping not supported port id=%d\n", pi);
2627 rte_exit(EXIT_FAILURE,
2628 "set_rx_queue_stats_mapping_registers "
2629 "failed for port id=%d diag=%d\n",
2635 rxtx_port_config(struct rte_port *port)
2639 for (qid = 0; qid < nb_rxq; qid++) {
2640 port->rx_conf[qid] = port->dev_info.default_rxconf;
2642 /* Check if any Rx parameters have been passed */
2643 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2644 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2646 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2647 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2649 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2650 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2652 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2653 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2655 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2656 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2658 port->nb_rx_desc[qid] = nb_rxd;
2661 for (qid = 0; qid < nb_txq; qid++) {
2662 port->tx_conf[qid] = port->dev_info.default_txconf;
2664 /* Check if any Tx parameters have been passed */
2665 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2666 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2668 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2669 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2671 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2672 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2674 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2675 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2677 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2678 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2680 port->nb_tx_desc[qid] = nb_txd;
2685 init_port_config(void)
2688 struct rte_port *port;
2690 RTE_ETH_FOREACH_DEV(pid) {
2692 port->dev_conf.fdir_conf = fdir_conf;
2693 rte_eth_dev_info_get(pid, &port->dev_info);
2695 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2696 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2697 rss_hf & port->dev_info.flow_type_rss_offloads;
2699 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2700 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2703 if (port->dcb_flag == 0) {
2704 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2705 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2707 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2710 rxtx_port_config(port);
2712 rte_eth_macaddr_get(pid, &port->eth_addr);
2714 map_port_queue_stats_mapping_registers(pid, port);
2715 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2716 rte_pmd_ixgbe_bypass_init(pid);
2719 if (lsc_interrupt &&
2720 (rte_eth_devices[pid].data->dev_flags &
2721 RTE_ETH_DEV_INTR_LSC))
2722 port->dev_conf.intr_conf.lsc = 1;
2723 if (rmv_interrupt &&
2724 (rte_eth_devices[pid].data->dev_flags &
2725 RTE_ETH_DEV_INTR_RMV))
2726 port->dev_conf.intr_conf.rmv = 1;
2730 void set_port_slave_flag(portid_t slave_pid)
2732 struct rte_port *port;
2734 port = &ports[slave_pid];
2735 port->slave_flag = 1;
2738 void clear_port_slave_flag(portid_t slave_pid)
2740 struct rte_port *port;
2742 port = &ports[slave_pid];
2743 port->slave_flag = 0;
2746 uint8_t port_is_bonding_slave(portid_t slave_pid)
2748 struct rte_port *port;
2750 port = &ports[slave_pid];
2751 if ((rte_eth_devices[slave_pid].data->dev_flags &
2752 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2757 const uint16_t vlan_tags[] = {
2758 0, 1, 2, 3, 4, 5, 6, 7,
2759 8, 9, 10, 11, 12, 13, 14, 15,
2760 16, 17, 18, 19, 20, 21, 22, 23,
2761 24, 25, 26, 27, 28, 29, 30, 31
2765 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2766 enum dcb_mode_enable dcb_mode,
2767 enum rte_eth_nb_tcs num_tcs,
2772 struct rte_eth_rss_conf rss_conf;
2775 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2776 * given above, and the number of traffic classes available for use.
2778 if (dcb_mode == DCB_VT_ENABLED) {
2779 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2780 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2781 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2782 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2784 /* VMDQ+DCB RX and TX configurations */
2785 vmdq_rx_conf->enable_default_pool = 0;
2786 vmdq_rx_conf->default_pool = 0;
2787 vmdq_rx_conf->nb_queue_pools =
2788 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2789 vmdq_tx_conf->nb_queue_pools =
2790 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2792 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2793 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2794 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2795 vmdq_rx_conf->pool_map[i].pools =
2796 1 << (i % vmdq_rx_conf->nb_queue_pools);
2798 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2799 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2800 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2803 /* set DCB mode of RX and TX of multiple queues */
2804 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2805 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2807 struct rte_eth_dcb_rx_conf *rx_conf =
2808 ð_conf->rx_adv_conf.dcb_rx_conf;
2809 struct rte_eth_dcb_tx_conf *tx_conf =
2810 ð_conf->tx_adv_conf.dcb_tx_conf;
2812 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2816 rx_conf->nb_tcs = num_tcs;
2817 tx_conf->nb_tcs = num_tcs;
2819 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2820 rx_conf->dcb_tc[i] = i % num_tcs;
2821 tx_conf->dcb_tc[i] = i % num_tcs;
2824 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2825 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2826 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2830 eth_conf->dcb_capability_en =
2831 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2833 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2839 init_port_dcb_config(portid_t pid,
2840 enum dcb_mode_enable dcb_mode,
2841 enum rte_eth_nb_tcs num_tcs,
2844 struct rte_eth_conf port_conf;
2845 struct rte_port *rte_port;
2849 rte_port = &ports[pid];
2851 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2852 /* Enter DCB configuration status */
2855 port_conf.rxmode = rte_port->dev_conf.rxmode;
2856 port_conf.txmode = rte_port->dev_conf.txmode;
2858 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2859 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2862 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2864 /* re-configure the device . */
2865 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2867 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2869 /* If dev_info.vmdq_pool_base is greater than 0,
2870 * the queue id of vmdq pools is started after pf queues.
2872 if (dcb_mode == DCB_VT_ENABLED &&
2873 rte_port->dev_info.vmdq_pool_base > 0) {
2874 printf("VMDQ_DCB multi-queue mode is nonsensical"
2875 " for port %d.", pid);
2879 /* Assume the ports in testpmd have the same dcb capability
2880 * and has the same number of rxq and txq in dcb mode
2882 if (dcb_mode == DCB_VT_ENABLED) {
2883 if (rte_port->dev_info.max_vfs > 0) {
2884 nb_rxq = rte_port->dev_info.nb_rx_queues;
2885 nb_txq = rte_port->dev_info.nb_tx_queues;
2887 nb_rxq = rte_port->dev_info.max_rx_queues;
2888 nb_txq = rte_port->dev_info.max_tx_queues;
2891 /*if vt is disabled, use all pf queues */
2892 if (rte_port->dev_info.vmdq_pool_base == 0) {
2893 nb_rxq = rte_port->dev_info.max_rx_queues;
2894 nb_txq = rte_port->dev_info.max_tx_queues;
2896 nb_rxq = (queueid_t)num_tcs;
2897 nb_txq = (queueid_t)num_tcs;
2901 rx_free_thresh = 64;
2903 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2905 rxtx_port_config(rte_port);
2907 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2908 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2909 rx_vft_set(pid, vlan_tags[i], 1);
2911 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2912 map_port_queue_stats_mapping_registers(pid, rte_port);
2914 rte_port->dcb_flag = 1;
2922 /* Configuration of Ethernet ports. */
2923 ports = rte_zmalloc("testpmd: ports",
2924 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2925 RTE_CACHE_LINE_SIZE);
2926 if (ports == NULL) {
2927 rte_exit(EXIT_FAILURE,
2928 "rte_zmalloc(%d struct rte_port) failed\n",
2944 const char clr[] = { 27, '[', '2', 'J', '\0' };
2945 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2947 /* Clear screen and move to top left */
2948 printf("%s%s", clr, top_left);
2950 printf("\nPort statistics ====================================");
2951 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2952 nic_stats_display(fwd_ports_ids[i]);
2956 signal_handler(int signum)
2958 if (signum == SIGINT || signum == SIGTERM) {
2959 printf("\nSignal %d received, preparing to exit...\n",
2961 #ifdef RTE_LIBRTE_PDUMP
2962 /* uninitialize packet capture framework */
2965 #ifdef RTE_LIBRTE_LATENCY_STATS
2966 rte_latencystats_uninit();
2969 /* Set flag to indicate the force termination. */
2971 /* exit with the expected status */
2972 signal(signum, SIG_DFL);
2973 kill(getpid(), signum);
2978 main(int argc, char** argv)
2985 signal(SIGINT, signal_handler);
2986 signal(SIGTERM, signal_handler);
2988 diag = rte_eal_init(argc, argv);
2990 rte_panic("Cannot init EAL\n");
2992 testpmd_logtype = rte_log_register("testpmd");
2993 if (testpmd_logtype < 0)
2994 rte_panic("Cannot register log type");
2995 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2997 #ifdef RTE_LIBRTE_PDUMP
2998 /* initialize packet capture framework */
2999 rte_pdump_init(NULL);
3003 RTE_ETH_FOREACH_DEV(port_id) {
3004 ports_ids[count] = port_id;
3007 nb_ports = (portid_t) count;
3009 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3011 /* allocate port structures, and init them */
3014 set_def_fwd_config();
3016 rte_panic("Empty set of forwarding logical cores - check the "
3017 "core mask supplied in the command parameters\n");
3019 /* Bitrate/latency stats disabled by default */
3020 #ifdef RTE_LIBRTE_BITRATE
3021 bitrate_enabled = 0;
3023 #ifdef RTE_LIBRTE_LATENCY_STATS
3024 latencystats_enabled = 0;
3027 /* on FreeBSD, mlockall() is disabled by default */
3028 #ifdef RTE_EXEC_ENV_BSDAPP
3037 launch_args_parse(argc, argv);
3039 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3040 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3044 if (tx_first && interactive)
3045 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3046 "interactive mode.\n");
3048 if (tx_first && lsc_interrupt) {
3049 printf("Warning: lsc_interrupt needs to be off when "
3050 " using tx_first. Disabling.\n");
3054 if (!nb_rxq && !nb_txq)
3055 printf("Warning: Either rx or tx queues should be non-zero\n");
3057 if (nb_rxq > 1 && nb_rxq > nb_txq)
3058 printf("Warning: nb_rxq=%d enables RSS configuration, "
3059 "but nb_txq=%d will prevent to fully test it.\n",
3065 /* enable hot plug monitoring */
3066 ret = rte_dev_event_monitor_start();
3071 eth_dev_event_callback_register();
3075 if (start_port(RTE_PORT_ALL) != 0)
3076 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3078 /* set all ports to promiscuous mode by default */
3079 RTE_ETH_FOREACH_DEV(port_id)
3080 rte_eth_promiscuous_enable(port_id);
3082 /* Init metrics library */
3083 rte_metrics_init(rte_socket_id());
3085 #ifdef RTE_LIBRTE_LATENCY_STATS
3086 if (latencystats_enabled != 0) {
3087 int ret = rte_latencystats_init(1, NULL);
3089 printf("Warning: latencystats init()"
3090 " returned error %d\n", ret);
3091 printf("Latencystats running on lcore %d\n",
3092 latencystats_lcore_id);
3096 /* Setup bitrate stats */
3097 #ifdef RTE_LIBRTE_BITRATE
3098 if (bitrate_enabled != 0) {
3099 bitrate_data = rte_stats_bitrate_create();
3100 if (bitrate_data == NULL)
3101 rte_exit(EXIT_FAILURE,
3102 "Could not allocate bitrate data.\n");
3103 rte_stats_bitrate_reg(bitrate_data);
3107 #ifdef RTE_LIBRTE_CMDLINE
3108 if (strlen(cmdline_filename) != 0)
3109 cmdline_read_from_file(cmdline_filename);
3111 if (interactive == 1) {
3113 printf("Start automatic packet forwarding\n");
3114 start_packet_forwarding(0);
3126 printf("No commandline core given, start packet forwarding\n");
3127 start_packet_forwarding(tx_first);
3128 if (stats_period != 0) {
3129 uint64_t prev_time = 0, cur_time, diff_time = 0;
3130 uint64_t timer_period;
3132 /* Convert to number of cycles */
3133 timer_period = stats_period * rte_get_timer_hz();
3135 while (f_quit == 0) {
3136 cur_time = rte_get_timer_cycles();
3137 diff_time += cur_time - prev_time;
3139 if (diff_time >= timer_period) {
3141 /* Reset the timer */
3144 /* Sleep to avoid unnecessary checks */
3145 prev_time = cur_time;
3150 printf("Press enter to exit\n");
3151 rc = read(0, &c, 1);