1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
230 * Configurable number of RX/TX queues.
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
236 * Configurable number of RX/TX ring descriptors.
237 * Defaults are supplied by drivers via ethdev.
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
244 #define RTE_PMD_PARAM_UNSET -1
246 * Configurable values of RX and TX ring threshold registers.
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of RX free threshold.
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of RX drop enable.
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
268 * Configurable value of TX free threshold.
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
273 * Configurable value of TX RS bit threshold.
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
278 * Configurable value of buffered packets before sending.
280 uint16_t noisy_tx_sw_bufsz;
283 * Configurable value of packet buffer timeout.
285 uint16_t noisy_tx_sw_buf_flush_time;
288 * Configurable value for size of VNF internal memory area
289 * used for simulating noisy neighbour behaviour
291 uint64_t noisy_lkup_mem_sz;
294 * Configurable value of number of random writes done in
295 * VNF simulation memory area.
297 uint64_t noisy_lkup_num_writes;
300 * Configurable value of number of random reads done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_reads;
306 * Configurable value of number of random reads/writes done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads_writes;
312 * Receive Side Scaling (RSS) configuration.
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
317 * Port topology configuration
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
322 * Avoids to flush all the RX streams before starts forwarding.
324 uint8_t no_flush_rx = 0; /* flush by default */
327 * Flow API isolated mode.
329 uint8_t flow_isolate_all;
332 * Avoids to check link status when starting/stopping a port.
334 uint8_t no_link_check = 0; /* check by default */
337 * Enable link status change notification
339 uint8_t lsc_interrupt = 1; /* enabled by default */
342 * Enable device removal notification.
344 uint8_t rmv_interrupt = 1; /* enabled by default */
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
349 * Display or mask ether events
350 * Default to all events except VF_MBOX
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
360 * Decide if all memory are locked for performance.
365 * NIC bypass mode configuration options.
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
374 #ifdef RTE_LIBRTE_LATENCY_STATS
377 * Set when latency stats is enabled in the commandline
379 uint8_t latencystats_enabled;
382 * Lcore ID to serive latency statistics.
384 lcoreid_t latencystats_lcore_id = -1;
389 * Ethernet device configuration.
391 struct rte_eth_rxmode rx_mode = {
392 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
395 struct rte_eth_txmode tx_mode = {
396 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
399 struct rte_fdir_conf fdir_conf = {
400 .mode = RTE_FDIR_MODE_NONE,
401 .pballoc = RTE_FDIR_PBALLOC_64K,
402 .status = RTE_FDIR_REPORT_STATUS,
404 .vlan_tci_mask = 0xFFEF,
406 .src_ip = 0xFFFFFFFF,
407 .dst_ip = 0xFFFFFFFF,
410 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
413 .src_port_mask = 0xFFFF,
414 .dst_port_mask = 0xFFFF,
415 .mac_addr_byte_mask = 0xFF,
416 .tunnel_type_mask = 1,
417 .tunnel_id_mask = 0xFFFFFFFF,
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
434 * Display zero values by default for xstats
436 uint8_t xstats_hide_zero;
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
451 struct vxlan_encap_conf vxlan_encap_conf = {
454 .vni = "\x00\x00\x00",
456 .udp_dst = RTE_BE16(4789),
457 .ipv4_src = IPv4(127, 0, 0, 1),
458 .ipv4_dst = IPv4(255, 255, 255, 255),
459 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460 "\x00\x00\x00\x00\x00\x00\x00\x01",
461 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462 "\x00\x00\x00\x00\x00\x00\x11\x11",
464 .eth_src = "\x00\x00\x00\x00\x00\x00",
465 .eth_dst = "\xff\xff\xff\xff\xff\xff",
468 struct nvgre_encap_conf nvgre_encap_conf = {
471 .tni = "\x00\x00\x00",
472 .ipv4_src = IPv4(127, 0, 0, 1),
473 .ipv4_dst = IPv4(255, 255, 255, 255),
474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 "\x00\x00\x00\x00\x00\x00\x00\x01",
476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 "\x00\x00\x00\x00\x00\x00\x11\x11",
479 .eth_src = "\x00\x00\x00\x00\x00\x00",
480 .eth_dst = "\xff\xff\xff\xff\xff\xff",
483 /* Forward function declarations */
484 static void map_port_queue_stats_mapping_registers(portid_t pi,
485 struct rte_port *port);
486 static void check_all_ports_link_status(uint32_t port_mask);
487 static int eth_event_callback(portid_t port_id,
488 enum rte_eth_event_type type,
489 void *param, void *ret_param);
490 static void eth_dev_event_callback(const char *device_name,
491 enum rte_dev_event_type type,
495 * Check if all the ports are started.
496 * If yes, return positive value. If not, return zero.
498 static int all_ports_started(void);
500 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
501 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
504 * Helper function to check if socket is already discovered.
505 * If yes, return positive value. If not, return zero.
508 new_socket_id(unsigned int socket_id)
512 for (i = 0; i < num_sockets; i++) {
513 if (socket_ids[i] == socket_id)
520 * Setup default configuration.
523 set_default_fwd_lcores_config(void)
527 unsigned int sock_num;
530 for (i = 0; i < RTE_MAX_LCORE; i++) {
531 if (!rte_lcore_is_enabled(i))
533 sock_num = rte_lcore_to_socket_id(i);
534 if (new_socket_id(sock_num)) {
535 if (num_sockets >= RTE_MAX_NUMA_NODES) {
536 rte_exit(EXIT_FAILURE,
537 "Total sockets greater than %u\n",
540 socket_ids[num_sockets++] = sock_num;
542 if (i == rte_get_master_lcore())
544 fwd_lcores_cpuids[nb_lc++] = i;
546 nb_lcores = (lcoreid_t) nb_lc;
547 nb_cfg_lcores = nb_lcores;
552 set_def_peer_eth_addrs(void)
556 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
557 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
558 peer_eth_addrs[i].addr_bytes[5] = i;
563 set_default_fwd_ports_config(void)
568 RTE_ETH_FOREACH_DEV(pt_id)
569 fwd_ports_ids[i++] = pt_id;
571 nb_cfg_ports = nb_ports;
572 nb_fwd_ports = nb_ports;
576 set_def_fwd_config(void)
578 set_default_fwd_lcores_config();
579 set_def_peer_eth_addrs();
580 set_default_fwd_ports_config();
583 /* extremely pessimistic estimation of memory required to create a mempool */
585 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
587 unsigned int n_pages, mbuf_per_pg, leftover;
588 uint64_t total_mem, mbuf_mem, obj_sz;
590 /* there is no good way to predict how much space the mempool will
591 * occupy because it will allocate chunks on the fly, and some of those
592 * will come from default DPDK memory while some will come from our
593 * external memory, so just assume 128MB will be enough for everyone.
595 uint64_t hdr_mem = 128 << 20;
597 /* account for possible non-contiguousness */
598 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
600 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
604 mbuf_per_pg = pgsz / obj_sz;
605 leftover = (nb_mbufs % mbuf_per_pg) > 0;
606 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
608 mbuf_mem = n_pages * pgsz;
610 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
612 if (total_mem > SIZE_MAX) {
613 TESTPMD_LOG(ERR, "Memory size too big\n");
616 *out = (size_t)total_mem;
621 static inline uint32_t
624 return (uint32_t)__builtin_ctzll(v);
627 static inline uint32_t
632 v = rte_align64pow2(v);
637 pagesz_flags(uint64_t page_sz)
639 /* as per mmap() manpage, all page sizes are log2 of page size
640 * shifted by MAP_HUGE_SHIFT
642 int log2 = log2_u64(page_sz);
644 return (log2 << HUGE_SHIFT);
648 alloc_mem(size_t memsz, size_t pgsz, bool huge)
653 /* allocate anonymous hugepages */
654 flags = MAP_ANONYMOUS | MAP_PRIVATE;
656 flags |= HUGE_FLAG | pagesz_flags(pgsz);
658 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
659 if (addr == MAP_FAILED)
665 struct extmem_param {
669 rte_iova_t *iova_table;
670 unsigned int iova_table_len;
674 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
677 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
678 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
679 unsigned int cur_page, n_pages, pgsz_idx;
680 size_t mem_sz, cur_pgsz;
681 rte_iova_t *iovas = NULL;
685 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
686 /* skip anything that is too big */
687 if (pgsizes[pgsz_idx] > SIZE_MAX)
690 cur_pgsz = pgsizes[pgsz_idx];
692 /* if we were told not to allocate hugepages, override */
694 cur_pgsz = sysconf(_SC_PAGESIZE);
696 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
698 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
702 /* allocate our memory */
703 addr = alloc_mem(mem_sz, cur_pgsz, huge);
705 /* if we couldn't allocate memory with a specified page size,
706 * that doesn't mean we can't do it with other page sizes, so
712 /* store IOVA addresses for every page in this memory area */
713 n_pages = mem_sz / cur_pgsz;
715 iovas = malloc(sizeof(*iovas) * n_pages);
718 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
721 /* lock memory if it's not huge pages */
725 /* populate IOVA addresses */
726 for (cur_page = 0; cur_page < n_pages; cur_page++) {
731 offset = cur_pgsz * cur_page;
732 cur = RTE_PTR_ADD(addr, offset);
734 /* touch the page before getting its IOVA */
735 *(volatile char *)cur = 0;
737 iova = rte_mem_virt2iova(cur);
739 iovas[cur_page] = iova;
744 /* if we couldn't allocate anything */
750 param->pgsz = cur_pgsz;
751 param->iova_table = iovas;
752 param->iova_table_len = n_pages;
759 munmap(addr, mem_sz);
765 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
767 struct extmem_param param;
770 memset(¶m, 0, sizeof(param));
772 /* check if our heap exists */
773 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
775 /* create our heap */
776 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
778 TESTPMD_LOG(ERR, "Cannot create heap\n");
783 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
785 TESTPMD_LOG(ERR, "Cannot create memory area\n");
789 /* we now have a valid memory area, so add it to heap */
790 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
791 param.addr, param.len, param.iova_table,
792 param.iova_table_len, param.pgsz);
794 /* when using VFIO, memory is automatically mapped for DMA by EAL */
796 /* not needed any more */
797 free(param.iova_table);
800 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
801 munmap(param.addr, param.len);
807 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
814 * Configuration initialisation done once at init time.
817 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
818 unsigned int socket_id)
820 char pool_name[RTE_MEMPOOL_NAMESIZE];
821 struct rte_mempool *rte_mp = NULL;
824 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
825 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
828 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
829 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
831 switch (mp_alloc_type) {
832 case MP_ALLOC_NATIVE:
834 /* wrapper to rte_mempool_create() */
835 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
836 rte_mbuf_best_mempool_ops());
837 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
838 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
843 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
844 mb_size, (unsigned int) mb_mempool_cache,
845 sizeof(struct rte_pktmbuf_pool_private),
850 if (rte_mempool_populate_anon(rte_mp) == 0) {
851 rte_mempool_free(rte_mp);
855 rte_pktmbuf_pool_init(rte_mp, NULL);
856 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
860 case MP_ALLOC_XMEM_HUGE:
863 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
865 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
866 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
869 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
871 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
873 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
874 rte_mbuf_best_mempool_ops());
875 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
876 mb_mempool_cache, 0, mbuf_seg_size,
882 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
887 if (rte_mp == NULL) {
888 rte_exit(EXIT_FAILURE,
889 "Creation of mbuf pool for socket %u failed: %s\n",
890 socket_id, rte_strerror(rte_errno));
891 } else if (verbose_level > 0) {
892 rte_mempool_dump(stdout, rte_mp);
897 * Check given socket id is valid or not with NUMA mode,
898 * if valid, return 0, else return -1
901 check_socket_id(const unsigned int socket_id)
903 static int warning_once = 0;
905 if (new_socket_id(socket_id)) {
906 if (!warning_once && numa_support)
907 printf("Warning: NUMA should be configured manually by"
908 " using --port-numa-config and"
909 " --ring-numa-config parameters along with"
918 * Get the allowed maximum number of RX queues.
919 * *pid return the port id which has minimal value of
920 * max_rx_queues in all ports.
923 get_allowed_max_nb_rxq(portid_t *pid)
925 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
927 struct rte_eth_dev_info dev_info;
929 RTE_ETH_FOREACH_DEV(pi) {
930 rte_eth_dev_info_get(pi, &dev_info);
931 if (dev_info.max_rx_queues < allowed_max_rxq) {
932 allowed_max_rxq = dev_info.max_rx_queues;
936 return allowed_max_rxq;
940 * Check input rxq is valid or not.
941 * If input rxq is not greater than any of maximum number
942 * of RX queues of all ports, it is valid.
943 * if valid, return 0, else return -1
946 check_nb_rxq(queueid_t rxq)
948 queueid_t allowed_max_rxq;
951 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
952 if (rxq > allowed_max_rxq) {
953 printf("Fail: input rxq (%u) can't be greater "
954 "than max_rx_queues (%u) of port %u\n",
964 * Get the allowed maximum number of TX queues.
965 * *pid return the port id which has minimal value of
966 * max_tx_queues in all ports.
969 get_allowed_max_nb_txq(portid_t *pid)
971 queueid_t allowed_max_txq = MAX_QUEUE_ID;
973 struct rte_eth_dev_info dev_info;
975 RTE_ETH_FOREACH_DEV(pi) {
976 rte_eth_dev_info_get(pi, &dev_info);
977 if (dev_info.max_tx_queues < allowed_max_txq) {
978 allowed_max_txq = dev_info.max_tx_queues;
982 return allowed_max_txq;
986 * Check input txq is valid or not.
987 * If input txq is not greater than any of maximum number
988 * of TX queues of all ports, it is valid.
989 * if valid, return 0, else return -1
992 check_nb_txq(queueid_t txq)
994 queueid_t allowed_max_txq;
997 allowed_max_txq = get_allowed_max_nb_txq(&pid);
998 if (txq > allowed_max_txq) {
999 printf("Fail: input txq (%u) can't be greater "
1000 "than max_tx_queues (%u) of port %u\n",
1013 struct rte_port *port;
1014 struct rte_mempool *mbp;
1015 unsigned int nb_mbuf_per_pool;
1017 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1018 struct rte_gro_param gro_param;
1022 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1024 /* Configuration of logical cores. */
1025 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1026 sizeof(struct fwd_lcore *) * nb_lcores,
1027 RTE_CACHE_LINE_SIZE);
1028 if (fwd_lcores == NULL) {
1029 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1030 "failed\n", nb_lcores);
1032 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1033 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1034 sizeof(struct fwd_lcore),
1035 RTE_CACHE_LINE_SIZE);
1036 if (fwd_lcores[lc_id] == NULL) {
1037 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1040 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1043 RTE_ETH_FOREACH_DEV(pid) {
1045 /* Apply default TxRx configuration for all ports */
1046 port->dev_conf.txmode = tx_mode;
1047 port->dev_conf.rxmode = rx_mode;
1048 rte_eth_dev_info_get(pid, &port->dev_info);
1050 if (!(port->dev_info.tx_offload_capa &
1051 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1052 port->dev_conf.txmode.offloads &=
1053 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1055 if (port_numa[pid] != NUMA_NO_CONFIG)
1056 port_per_socket[port_numa[pid]]++;
1058 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1061 * if socket_id is invalid,
1062 * set to the first available socket.
1064 if (check_socket_id(socket_id) < 0)
1065 socket_id = socket_ids[0];
1066 port_per_socket[socket_id]++;
1070 /* Apply Rx offloads configuration */
1071 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1072 port->rx_conf[k].offloads =
1073 port->dev_conf.rxmode.offloads;
1074 /* Apply Tx offloads configuration */
1075 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1076 port->tx_conf[k].offloads =
1077 port->dev_conf.txmode.offloads;
1079 /* set flag to initialize port/queue */
1080 port->need_reconfig = 1;
1081 port->need_reconfig_queues = 1;
1085 * Create pools of mbuf.
1086 * If NUMA support is disabled, create a single pool of mbuf in
1087 * socket 0 memory by default.
1088 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1090 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1091 * nb_txd can be configured at run time.
1093 if (param_total_num_mbufs)
1094 nb_mbuf_per_pool = param_total_num_mbufs;
1096 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1097 (nb_lcores * mb_mempool_cache) +
1098 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1099 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1105 for (i = 0; i < num_sockets; i++)
1106 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1109 if (socket_num == UMA_NO_CONFIG)
1110 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1112 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1118 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1119 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1121 * Records which Mbuf pool to use by each logical core, if needed.
1123 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1124 mbp = mbuf_pool_find(
1125 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1128 mbp = mbuf_pool_find(0);
1129 fwd_lcores[lc_id]->mbp = mbp;
1130 /* initialize GSO context */
1131 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1132 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1133 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1134 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1136 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1139 /* Configuration of packet forwarding streams. */
1140 if (init_fwd_streams() < 0)
1141 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1145 /* create a gro context for each lcore */
1146 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1147 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1148 gro_param.max_item_per_flow = MAX_PKT_BURST;
1149 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1150 gro_param.socket_id = rte_lcore_to_socket_id(
1151 fwd_lcores_cpuids[lc_id]);
1152 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1153 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1154 rte_exit(EXIT_FAILURE,
1155 "rte_gro_ctx_create() failed\n");
1159 #if defined RTE_LIBRTE_PMD_SOFTNIC
1160 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1161 RTE_ETH_FOREACH_DEV(pid) {
1163 const char *driver = port->dev_info.driver_name;
1165 if (strcmp(driver, "net_softnic") == 0)
1166 port->softport.fwd_lcore_arg = fwd_lcores;
1175 reconfig(portid_t new_port_id, unsigned socket_id)
1177 struct rte_port *port;
1179 /* Reconfiguration of Ethernet ports. */
1180 port = &ports[new_port_id];
1181 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1183 /* set flag to initialize port/queue */
1184 port->need_reconfig = 1;
1185 port->need_reconfig_queues = 1;
1186 port->socket_id = socket_id;
1193 init_fwd_streams(void)
1196 struct rte_port *port;
1197 streamid_t sm_id, nb_fwd_streams_new;
1200 /* set socket id according to numa or not */
1201 RTE_ETH_FOREACH_DEV(pid) {
1203 if (nb_rxq > port->dev_info.max_rx_queues) {
1204 printf("Fail: nb_rxq(%d) is greater than "
1205 "max_rx_queues(%d)\n", nb_rxq,
1206 port->dev_info.max_rx_queues);
1209 if (nb_txq > port->dev_info.max_tx_queues) {
1210 printf("Fail: nb_txq(%d) is greater than "
1211 "max_tx_queues(%d)\n", nb_txq,
1212 port->dev_info.max_tx_queues);
1216 if (port_numa[pid] != NUMA_NO_CONFIG)
1217 port->socket_id = port_numa[pid];
1219 port->socket_id = rte_eth_dev_socket_id(pid);
1222 * if socket_id is invalid,
1223 * set to the first available socket.
1225 if (check_socket_id(port->socket_id) < 0)
1226 port->socket_id = socket_ids[0];
1230 if (socket_num == UMA_NO_CONFIG)
1231 port->socket_id = 0;
1233 port->socket_id = socket_num;
1237 q = RTE_MAX(nb_rxq, nb_txq);
1239 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1242 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1243 if (nb_fwd_streams_new == nb_fwd_streams)
1246 if (fwd_streams != NULL) {
1247 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1248 if (fwd_streams[sm_id] == NULL)
1250 rte_free(fwd_streams[sm_id]);
1251 fwd_streams[sm_id] = NULL;
1253 rte_free(fwd_streams);
1258 nb_fwd_streams = nb_fwd_streams_new;
1259 if (nb_fwd_streams) {
1260 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1261 sizeof(struct fwd_stream *) * nb_fwd_streams,
1262 RTE_CACHE_LINE_SIZE);
1263 if (fwd_streams == NULL)
1264 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1265 " (struct fwd_stream *)) failed\n",
1268 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1269 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1270 " struct fwd_stream", sizeof(struct fwd_stream),
1271 RTE_CACHE_LINE_SIZE);
1272 if (fwd_streams[sm_id] == NULL)
1273 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1274 "(struct fwd_stream) failed\n");
1281 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1283 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1285 unsigned int total_burst;
1286 unsigned int nb_burst;
1287 unsigned int burst_stats[3];
1288 uint16_t pktnb_stats[3];
1290 int burst_percent[3];
1293 * First compute the total number of packet bursts and the
1294 * two highest numbers of bursts of the same number of packets.
1297 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1298 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1299 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1300 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1303 total_burst += nb_burst;
1304 if (nb_burst > burst_stats[0]) {
1305 burst_stats[1] = burst_stats[0];
1306 pktnb_stats[1] = pktnb_stats[0];
1307 burst_stats[0] = nb_burst;
1308 pktnb_stats[0] = nb_pkt;
1309 } else if (nb_burst > burst_stats[1]) {
1310 burst_stats[1] = nb_burst;
1311 pktnb_stats[1] = nb_pkt;
1314 if (total_burst == 0)
1316 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1317 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1318 burst_percent[0], (int) pktnb_stats[0]);
1319 if (burst_stats[0] == total_burst) {
1323 if (burst_stats[0] + burst_stats[1] == total_burst) {
1324 printf(" + %d%% of %d pkts]\n",
1325 100 - burst_percent[0], pktnb_stats[1]);
1328 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1329 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1330 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1331 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1334 printf(" + %d%% of %d pkts + %d%% of others]\n",
1335 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1337 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1340 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1342 struct rte_port *port;
1345 static const char *fwd_stats_border = "----------------------";
1347 port = &ports[port_id];
1348 printf("\n %s Forward statistics for port %-2d %s\n",
1349 fwd_stats_border, port_id, fwd_stats_border);
1351 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1352 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1354 stats->ipackets, stats->imissed,
1355 (uint64_t) (stats->ipackets + stats->imissed));
1357 if (cur_fwd_eng == &csum_fwd_engine)
1358 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1359 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1360 port->rx_bad_outer_l4_csum);
1361 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1362 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1363 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1366 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1368 stats->opackets, port->tx_dropped,
1369 (uint64_t) (stats->opackets + port->tx_dropped));
1372 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1374 stats->ipackets, stats->imissed,
1375 (uint64_t) (stats->ipackets + stats->imissed));
1377 if (cur_fwd_eng == &csum_fwd_engine)
1378 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
1379 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1380 port->rx_bad_outer_l4_csum);
1381 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1382 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1383 printf(" RX-nombufs: %14"PRIu64"\n",
1387 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1389 stats->opackets, port->tx_dropped,
1390 (uint64_t) (stats->opackets + port->tx_dropped));
1393 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1394 if (port->rx_stream)
1395 pkt_burst_stats_display("RX",
1396 &port->rx_stream->rx_burst_stats);
1397 if (port->tx_stream)
1398 pkt_burst_stats_display("TX",
1399 &port->tx_stream->tx_burst_stats);
1402 if (port->rx_queue_stats_mapping_enabled) {
1404 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1405 printf(" Stats reg %2d RX-packets:%14"PRIu64
1406 " RX-errors:%14"PRIu64
1407 " RX-bytes:%14"PRIu64"\n",
1408 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1412 if (port->tx_queue_stats_mapping_enabled) {
1413 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1414 printf(" Stats reg %2d TX-packets:%14"PRIu64
1415 " TX-bytes:%14"PRIu64"\n",
1416 i, stats->q_opackets[i], stats->q_obytes[i]);
1420 printf(" %s--------------------------------%s\n",
1421 fwd_stats_border, fwd_stats_border);
1425 fwd_stream_stats_display(streamid_t stream_id)
1427 struct fwd_stream *fs;
1428 static const char *fwd_top_stats_border = "-------";
1430 fs = fwd_streams[stream_id];
1431 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1432 (fs->fwd_dropped == 0))
1434 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1435 "TX Port=%2d/Queue=%2d %s\n",
1436 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1437 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1438 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1439 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1441 /* if checksum mode */
1442 if (cur_fwd_eng == &csum_fwd_engine) {
1443 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1444 "%-14u Rx- bad outer L4 checksum: %-14u\n",
1445 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1446 fs->rx_bad_outer_l4_csum);
1449 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1450 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1451 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1456 flush_fwd_rx_queues(void)
1458 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1465 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1466 uint64_t timer_period;
1468 /* convert to number of cycles */
1469 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1471 for (j = 0; j < 2; j++) {
1472 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1473 for (rxq = 0; rxq < nb_rxq; rxq++) {
1474 port_id = fwd_ports_ids[rxp];
1476 * testpmd can stuck in the below do while loop
1477 * if rte_eth_rx_burst() always returns nonzero
1478 * packets. So timer is added to exit this loop
1479 * after 1sec timer expiry.
1481 prev_tsc = rte_rdtsc();
1483 nb_rx = rte_eth_rx_burst(port_id, rxq,
1484 pkts_burst, MAX_PKT_BURST);
1485 for (i = 0; i < nb_rx; i++)
1486 rte_pktmbuf_free(pkts_burst[i]);
1488 cur_tsc = rte_rdtsc();
1489 diff_tsc = cur_tsc - prev_tsc;
1490 timer_tsc += diff_tsc;
1491 } while ((nb_rx > 0) &&
1492 (timer_tsc < timer_period));
1496 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1501 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1503 struct fwd_stream **fsm;
1506 #ifdef RTE_LIBRTE_BITRATE
1507 uint64_t tics_per_1sec;
1508 uint64_t tics_datum;
1509 uint64_t tics_current;
1510 uint16_t i, cnt_ports;
1512 cnt_ports = nb_ports;
1513 tics_datum = rte_rdtsc();
1514 tics_per_1sec = rte_get_timer_hz();
1516 fsm = &fwd_streams[fc->stream_idx];
1517 nb_fs = fc->stream_nb;
1519 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1520 (*pkt_fwd)(fsm[sm_id]);
1521 #ifdef RTE_LIBRTE_BITRATE
1522 if (bitrate_enabled != 0 &&
1523 bitrate_lcore_id == rte_lcore_id()) {
1524 tics_current = rte_rdtsc();
1525 if (tics_current - tics_datum >= tics_per_1sec) {
1526 /* Periodic bitrate calculation */
1527 for (i = 0; i < cnt_ports; i++)
1528 rte_stats_bitrate_calc(bitrate_data,
1530 tics_datum = tics_current;
1534 #ifdef RTE_LIBRTE_LATENCY_STATS
1535 if (latencystats_enabled != 0 &&
1536 latencystats_lcore_id == rte_lcore_id())
1537 rte_latencystats_update();
1540 } while (! fc->stopped);
1544 start_pkt_forward_on_core(void *fwd_arg)
1546 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1547 cur_fwd_config.fwd_eng->packet_fwd);
1552 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1553 * Used to start communication flows in network loopback test configurations.
1556 run_one_txonly_burst_on_core(void *fwd_arg)
1558 struct fwd_lcore *fwd_lc;
1559 struct fwd_lcore tmp_lcore;
1561 fwd_lc = (struct fwd_lcore *) fwd_arg;
1562 tmp_lcore = *fwd_lc;
1563 tmp_lcore.stopped = 1;
1564 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1569 * Launch packet forwarding:
1570 * - Setup per-port forwarding context.
1571 * - launch logical cores with their forwarding configuration.
1574 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1576 port_fwd_begin_t port_fwd_begin;
1581 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1582 if (port_fwd_begin != NULL) {
1583 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1584 (*port_fwd_begin)(fwd_ports_ids[i]);
1586 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1587 lc_id = fwd_lcores_cpuids[i];
1588 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1589 fwd_lcores[i]->stopped = 0;
1590 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1591 fwd_lcores[i], lc_id);
1593 printf("launch lcore %u failed - diag=%d\n",
1600 * Update the forward ports list.
1603 update_fwd_ports(portid_t new_pid)
1606 unsigned int new_nb_fwd_ports = 0;
1609 for (i = 0; i < nb_fwd_ports; ++i) {
1610 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1613 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1617 if (new_pid < RTE_MAX_ETHPORTS)
1618 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1620 nb_fwd_ports = new_nb_fwd_ports;
1621 nb_cfg_ports = new_nb_fwd_ports;
1625 * Launch packet forwarding configuration.
1628 start_packet_forwarding(int with_tx_first)
1630 port_fwd_begin_t port_fwd_begin;
1631 port_fwd_end_t port_fwd_end;
1632 struct rte_port *port;
1637 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1638 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1640 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1641 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1643 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1644 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1645 (!nb_rxq || !nb_txq))
1646 rte_exit(EXIT_FAILURE,
1647 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1648 cur_fwd_eng->fwd_mode_name);
1650 if (all_ports_started() == 0) {
1651 printf("Not all ports were started\n");
1654 if (test_done == 0) {
1655 printf("Packet forwarding already started\n");
1661 for (i = 0; i < nb_fwd_ports; i++) {
1662 pt_id = fwd_ports_ids[i];
1663 port = &ports[pt_id];
1664 if (!port->dcb_flag) {
1665 printf("In DCB mode, all forwarding ports must "
1666 "be configured in this mode.\n");
1670 if (nb_fwd_lcores == 1) {
1671 printf("In DCB mode,the nb forwarding cores "
1672 "should be larger than 1.\n");
1681 flush_fwd_rx_queues();
1683 pkt_fwd_config_display(&cur_fwd_config);
1684 rxtx_config_display();
1686 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1687 pt_id = fwd_ports_ids[i];
1688 port = &ports[pt_id];
1689 rte_eth_stats_get(pt_id, &port->stats);
1690 port->tx_dropped = 0;
1692 map_port_queue_stats_mapping_registers(pt_id, port);
1694 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1695 fwd_streams[sm_id]->rx_packets = 0;
1696 fwd_streams[sm_id]->tx_packets = 0;
1697 fwd_streams[sm_id]->fwd_dropped = 0;
1698 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1699 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1700 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1702 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1703 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1704 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1705 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1706 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1708 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1709 fwd_streams[sm_id]->core_cycles = 0;
1712 if (with_tx_first) {
1713 port_fwd_begin = tx_only_engine.port_fwd_begin;
1714 if (port_fwd_begin != NULL) {
1715 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1716 (*port_fwd_begin)(fwd_ports_ids[i]);
1718 while (with_tx_first--) {
1719 launch_packet_forwarding(
1720 run_one_txonly_burst_on_core);
1721 rte_eal_mp_wait_lcore();
1723 port_fwd_end = tx_only_engine.port_fwd_end;
1724 if (port_fwd_end != NULL) {
1725 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1726 (*port_fwd_end)(fwd_ports_ids[i]);
1729 launch_packet_forwarding(start_pkt_forward_on_core);
1733 stop_packet_forwarding(void)
1735 struct rte_eth_stats stats;
1736 struct rte_port *port;
1737 port_fwd_end_t port_fwd_end;
1742 uint64_t total_recv;
1743 uint64_t total_xmit;
1744 uint64_t total_rx_dropped;
1745 uint64_t total_tx_dropped;
1746 uint64_t total_rx_nombuf;
1747 uint64_t tx_dropped;
1748 uint64_t rx_bad_ip_csum;
1749 uint64_t rx_bad_l4_csum;
1750 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1751 uint64_t fwd_cycles;
1754 static const char *acc_stats_border = "+++++++++++++++";
1757 printf("Packet forwarding not started\n");
1760 printf("Telling cores to stop...");
1761 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1762 fwd_lcores[lc_id]->stopped = 1;
1763 printf("\nWaiting for lcores to finish...\n");
1764 rte_eal_mp_wait_lcore();
1765 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1766 if (port_fwd_end != NULL) {
1767 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1768 pt_id = fwd_ports_ids[i];
1769 (*port_fwd_end)(pt_id);
1772 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1775 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1776 if (cur_fwd_config.nb_fwd_streams >
1777 cur_fwd_config.nb_fwd_ports) {
1778 fwd_stream_stats_display(sm_id);
1779 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1780 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1782 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1784 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1787 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1788 tx_dropped = (uint64_t) (tx_dropped +
1789 fwd_streams[sm_id]->fwd_dropped);
1790 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1793 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1794 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1795 fwd_streams[sm_id]->rx_bad_ip_csum);
1796 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1800 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1801 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1802 fwd_streams[sm_id]->rx_bad_l4_csum);
1803 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1806 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1807 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1809 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1810 fwd_cycles = (uint64_t) (fwd_cycles +
1811 fwd_streams[sm_id]->core_cycles);
1816 total_rx_dropped = 0;
1817 total_tx_dropped = 0;
1818 total_rx_nombuf = 0;
1819 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1820 pt_id = fwd_ports_ids[i];
1822 port = &ports[pt_id];
1823 rte_eth_stats_get(pt_id, &stats);
1824 stats.ipackets -= port->stats.ipackets;
1825 port->stats.ipackets = 0;
1826 stats.opackets -= port->stats.opackets;
1827 port->stats.opackets = 0;
1828 stats.ibytes -= port->stats.ibytes;
1829 port->stats.ibytes = 0;
1830 stats.obytes -= port->stats.obytes;
1831 port->stats.obytes = 0;
1832 stats.imissed -= port->stats.imissed;
1833 port->stats.imissed = 0;
1834 stats.oerrors -= port->stats.oerrors;
1835 port->stats.oerrors = 0;
1836 stats.rx_nombuf -= port->stats.rx_nombuf;
1837 port->stats.rx_nombuf = 0;
1839 total_recv += stats.ipackets;
1840 total_xmit += stats.opackets;
1841 total_rx_dropped += stats.imissed;
1842 total_tx_dropped += port->tx_dropped;
1843 total_rx_nombuf += stats.rx_nombuf;
1845 fwd_port_stats_display(pt_id, &stats);
1848 printf("\n %s Accumulated forward statistics for all ports"
1850 acc_stats_border, acc_stats_border);
1851 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1853 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1855 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1856 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1857 if (total_rx_nombuf > 0)
1858 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1859 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1861 acc_stats_border, acc_stats_border);
1862 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1864 printf("\n CPU cycles/packet=%u (total cycles="
1865 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1866 (unsigned int)(fwd_cycles / total_recv),
1867 fwd_cycles, total_recv);
1869 printf("\nDone.\n");
1874 dev_set_link_up(portid_t pid)
1876 if (rte_eth_dev_set_link_up(pid) < 0)
1877 printf("\nSet link up fail.\n");
1881 dev_set_link_down(portid_t pid)
1883 if (rte_eth_dev_set_link_down(pid) < 0)
1884 printf("\nSet link down fail.\n");
1888 all_ports_started(void)
1891 struct rte_port *port;
1893 RTE_ETH_FOREACH_DEV(pi) {
1895 /* Check if there is a port which is not started */
1896 if ((port->port_status != RTE_PORT_STARTED) &&
1897 (port->slave_flag == 0))
1901 /* No port is not started */
1906 port_is_stopped(portid_t port_id)
1908 struct rte_port *port = &ports[port_id];
1910 if ((port->port_status != RTE_PORT_STOPPED) &&
1911 (port->slave_flag == 0))
1917 all_ports_stopped(void)
1921 RTE_ETH_FOREACH_DEV(pi) {
1922 if (!port_is_stopped(pi))
1930 port_is_started(portid_t port_id)
1932 if (port_id_is_invalid(port_id, ENABLED_WARN))
1935 if (ports[port_id].port_status != RTE_PORT_STARTED)
1942 port_is_closed(portid_t port_id)
1944 if (port_id_is_invalid(port_id, ENABLED_WARN))
1947 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1954 start_port(portid_t pid)
1956 int diag, need_check_link_status = -1;
1959 struct rte_port *port;
1960 struct ether_addr mac_addr;
1961 enum rte_eth_event_type event_type;
1963 if (port_id_is_invalid(pid, ENABLED_WARN))
1968 RTE_ETH_FOREACH_DEV(pi) {
1969 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1972 need_check_link_status = 0;
1974 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1975 RTE_PORT_HANDLING) == 0) {
1976 printf("Port %d is now not stopped\n", pi);
1980 if (port->need_reconfig > 0) {
1981 port->need_reconfig = 0;
1983 if (flow_isolate_all) {
1984 int ret = port_flow_isolate(pi, 1);
1986 printf("Failed to apply isolated"
1987 " mode on port %d\n", pi);
1992 printf("Configuring Port %d (socket %u)\n", pi,
1994 /* configure port */
1995 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1998 if (rte_atomic16_cmpset(&(port->port_status),
1999 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2000 printf("Port %d can not be set back "
2001 "to stopped\n", pi);
2002 printf("Fail to configure port %d\n", pi);
2003 /* try to reconfigure port next time */
2004 port->need_reconfig = 1;
2008 if (port->need_reconfig_queues > 0) {
2009 port->need_reconfig_queues = 0;
2010 /* setup tx queues */
2011 for (qi = 0; qi < nb_txq; qi++) {
2012 if ((numa_support) &&
2013 (txring_numa[pi] != NUMA_NO_CONFIG))
2014 diag = rte_eth_tx_queue_setup(pi, qi,
2015 port->nb_tx_desc[qi],
2017 &(port->tx_conf[qi]));
2019 diag = rte_eth_tx_queue_setup(pi, qi,
2020 port->nb_tx_desc[qi],
2022 &(port->tx_conf[qi]));
2027 /* Fail to setup tx queue, return */
2028 if (rte_atomic16_cmpset(&(port->port_status),
2030 RTE_PORT_STOPPED) == 0)
2031 printf("Port %d can not be set back "
2032 "to stopped\n", pi);
2033 printf("Fail to configure port %d tx queues\n",
2035 /* try to reconfigure queues next time */
2036 port->need_reconfig_queues = 1;
2039 for (qi = 0; qi < nb_rxq; qi++) {
2040 /* setup rx queues */
2041 if ((numa_support) &&
2042 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2043 struct rte_mempool * mp =
2044 mbuf_pool_find(rxring_numa[pi]);
2046 printf("Failed to setup RX queue:"
2047 "No mempool allocation"
2048 " on the socket %d\n",
2053 diag = rte_eth_rx_queue_setup(pi, qi,
2054 port->nb_rx_desc[qi],
2056 &(port->rx_conf[qi]),
2059 struct rte_mempool *mp =
2060 mbuf_pool_find(port->socket_id);
2062 printf("Failed to setup RX queue:"
2063 "No mempool allocation"
2064 " on the socket %d\n",
2068 diag = rte_eth_rx_queue_setup(pi, qi,
2069 port->nb_rx_desc[qi],
2071 &(port->rx_conf[qi]),
2077 /* Fail to setup rx queue, return */
2078 if (rte_atomic16_cmpset(&(port->port_status),
2080 RTE_PORT_STOPPED) == 0)
2081 printf("Port %d can not be set back "
2082 "to stopped\n", pi);
2083 printf("Fail to configure port %d rx queues\n",
2085 /* try to reconfigure queues next time */
2086 port->need_reconfig_queues = 1;
2092 if (rte_eth_dev_start(pi) < 0) {
2093 printf("Fail to start port %d\n", pi);
2095 /* Fail to setup rx queue, return */
2096 if (rte_atomic16_cmpset(&(port->port_status),
2097 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2098 printf("Port %d can not be set back to "
2103 if (rte_atomic16_cmpset(&(port->port_status),
2104 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2105 printf("Port %d can not be set into started\n", pi);
2107 rte_eth_macaddr_get(pi, &mac_addr);
2108 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2109 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2110 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2111 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2113 /* at least one port started, need checking link status */
2114 need_check_link_status = 1;
2117 for (event_type = RTE_ETH_EVENT_UNKNOWN;
2118 event_type < RTE_ETH_EVENT_MAX;
2120 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2125 printf("Failed to setup even callback for event %d\n",
2131 if (need_check_link_status == 1 && !no_link_check)
2132 check_all_ports_link_status(RTE_PORT_ALL);
2133 else if (need_check_link_status == 0)
2134 printf("Please stop the ports first\n");
2141 stop_port(portid_t pid)
2144 struct rte_port *port;
2145 int need_check_link_status = 0;
2152 if (port_id_is_invalid(pid, ENABLED_WARN))
2155 printf("Stopping ports...\n");
2157 RTE_ETH_FOREACH_DEV(pi) {
2158 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2161 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2162 printf("Please remove port %d from forwarding configuration.\n", pi);
2166 if (port_is_bonding_slave(pi)) {
2167 printf("Please remove port %d from bonded device.\n", pi);
2172 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2173 RTE_PORT_HANDLING) == 0)
2176 rte_eth_dev_stop(pi);
2178 if (rte_atomic16_cmpset(&(port->port_status),
2179 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2180 printf("Port %d can not be set into stopped\n", pi);
2181 need_check_link_status = 1;
2183 if (need_check_link_status && !no_link_check)
2184 check_all_ports_link_status(RTE_PORT_ALL);
2190 close_port(portid_t pid)
2193 struct rte_port *port;
2195 if (port_id_is_invalid(pid, ENABLED_WARN))
2198 printf("Closing ports...\n");
2200 RTE_ETH_FOREACH_DEV(pi) {
2201 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2204 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2205 printf("Please remove port %d from forwarding configuration.\n", pi);
2209 if (port_is_bonding_slave(pi)) {
2210 printf("Please remove port %d from bonded device.\n", pi);
2215 if (rte_atomic16_cmpset(&(port->port_status),
2216 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2217 printf("Port %d is already closed\n", pi);
2221 if (rte_atomic16_cmpset(&(port->port_status),
2222 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2223 printf("Port %d is now not stopped\n", pi);
2227 if (port->flow_list)
2228 port_flow_flush(pi);
2229 rte_eth_dev_close(pi);
2231 if (rte_atomic16_cmpset(&(port->port_status),
2232 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2233 printf("Port %d cannot be set to closed\n", pi);
2240 reset_port(portid_t pid)
2244 struct rte_port *port;
2246 if (port_id_is_invalid(pid, ENABLED_WARN))
2249 printf("Resetting ports...\n");
2251 RTE_ETH_FOREACH_DEV(pi) {
2252 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2255 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2256 printf("Please remove port %d from forwarding "
2257 "configuration.\n", pi);
2261 if (port_is_bonding_slave(pi)) {
2262 printf("Please remove port %d from bonded device.\n",
2267 diag = rte_eth_dev_reset(pi);
2270 port->need_reconfig = 1;
2271 port->need_reconfig_queues = 1;
2273 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2281 attach_port(char *identifier)
2284 unsigned int socket_id;
2286 printf("Attaching a new port...\n");
2288 if (identifier == NULL) {
2289 printf("Invalid parameters are specified\n");
2293 if (rte_eth_dev_attach(identifier, &pi))
2296 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2297 /* if socket_id is invalid, set to the first available socket. */
2298 if (check_socket_id(socket_id) < 0)
2299 socket_id = socket_ids[0];
2300 reconfig(pi, socket_id);
2301 rte_eth_promiscuous_enable(pi);
2303 ports_ids[nb_ports] = pi;
2304 nb_ports = rte_eth_dev_count_avail();
2306 ports[pi].port_status = RTE_PORT_STOPPED;
2308 update_fwd_ports(pi);
2310 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2315 detach_port(portid_t port_id)
2317 char name[RTE_ETH_NAME_MAX_LEN];
2320 printf("Detaching a port...\n");
2322 if (!port_is_closed(port_id)) {
2323 printf("Please close port first\n");
2327 if (ports[port_id].flow_list)
2328 port_flow_flush(port_id);
2330 if (rte_eth_dev_detach(port_id, name)) {
2331 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2335 for (i = 0; i < nb_ports; i++) {
2336 if (ports_ids[i] == port_id) {
2337 ports_ids[i] = ports_ids[nb_ports-1];
2338 ports_ids[nb_ports-1] = 0;
2342 nb_ports = rte_eth_dev_count_avail();
2344 update_fwd_ports(RTE_MAX_ETHPORTS);
2346 printf("Port %u is detached. Now total ports is %d\n",
2355 struct rte_device *device;
2360 stop_packet_forwarding();
2362 if (ports != NULL) {
2364 RTE_ETH_FOREACH_DEV(pt_id) {
2365 printf("\nShutting down port %d...\n", pt_id);
2371 * This is a workaround to fix a virtio-user issue that
2372 * requires to call clean-up routine to remove existing
2374 * This workaround valid only for testpmd, needs a fix
2375 * valid for all applications.
2376 * TODO: Implement proper resource cleanup
2378 device = rte_eth_devices[pt_id].device;
2379 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2385 ret = rte_dev_event_monitor_stop();
2388 "fail to stop device event monitor.");
2392 ret = rte_dev_event_callback_unregister(NULL,
2393 eth_dev_event_callback, NULL);
2396 "fail to unregister device event callback.\n");
2400 ret = rte_dev_hotplug_handle_disable();
2403 "fail to disable hotplug handling.\n");
2408 printf("\nBye...\n");
2411 typedef void (*cmd_func_t)(void);
2412 struct pmd_test_command {
2413 const char *cmd_name;
2414 cmd_func_t cmd_func;
2417 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2419 /* Check the link status of all ports in up to 9s, and print them finally */
2421 check_all_ports_link_status(uint32_t port_mask)
2423 #define CHECK_INTERVAL 100 /* 100ms */
2424 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2426 uint8_t count, all_ports_up, print_flag = 0;
2427 struct rte_eth_link link;
2429 printf("Checking link statuses...\n");
2431 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2433 RTE_ETH_FOREACH_DEV(portid) {
2434 if ((port_mask & (1 << portid)) == 0)
2436 memset(&link, 0, sizeof(link));
2437 rte_eth_link_get_nowait(portid, &link);
2438 /* print link status if flag set */
2439 if (print_flag == 1) {
2440 if (link.link_status)
2442 "Port%d Link Up. speed %u Mbps- %s\n",
2443 portid, link.link_speed,
2444 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2445 ("full-duplex") : ("half-duplex\n"));
2447 printf("Port %d Link Down\n", portid);
2450 /* clear all_ports_up flag if any link down */
2451 if (link.link_status == ETH_LINK_DOWN) {
2456 /* after finally printing all link status, get out */
2457 if (print_flag == 1)
2460 if (all_ports_up == 0) {
2462 rte_delay_ms(CHECK_INTERVAL);
2465 /* set the print_flag if all ports up or timeout */
2466 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2476 rmv_event_callback(void *arg)
2478 int need_to_start = 0;
2479 int org_no_link_check = no_link_check;
2480 portid_t port_id = (intptr_t)arg;
2482 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2484 if (!test_done && port_is_forwarding(port_id)) {
2486 stop_packet_forwarding();
2490 no_link_check = org_no_link_check;
2491 close_port(port_id);
2492 detach_port(port_id);
2494 start_packet_forwarding(0);
2497 /* This function is used by the interrupt thread */
2499 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2502 static const char * const event_desc[] = {
2503 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2504 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2505 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2506 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2507 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2508 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2509 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2510 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2511 [RTE_ETH_EVENT_NEW] = "device probed",
2512 [RTE_ETH_EVENT_DESTROY] = "device released",
2513 [RTE_ETH_EVENT_MAX] = NULL,
2516 RTE_SET_USED(param);
2517 RTE_SET_USED(ret_param);
2519 if (type >= RTE_ETH_EVENT_MAX) {
2520 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2521 port_id, __func__, type);
2523 } else if (event_print_mask & (UINT32_C(1) << type)) {
2524 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2529 if (port_id_is_invalid(port_id, DISABLED_WARN))
2533 case RTE_ETH_EVENT_INTR_RMV:
2534 if (rte_eal_alarm_set(100000,
2535 rmv_event_callback, (void *)(intptr_t)port_id))
2536 fprintf(stderr, "Could not set up deferred device removal\n");
2544 /* This function is used by the interrupt thread */
2546 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2547 __rte_unused void *arg)
2552 if (type >= RTE_DEV_EVENT_MAX) {
2553 fprintf(stderr, "%s called upon invalid event %d\n",
2559 case RTE_DEV_EVENT_REMOVE:
2560 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2562 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2564 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2568 rmv_event_callback((void *)(intptr_t)port_id);
2570 case RTE_DEV_EVENT_ADD:
2571 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2573 /* TODO: After finish kernel driver binding,
2574 * begin to attach port.
2583 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2587 uint8_t mapping_found = 0;
2589 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2590 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2591 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2592 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2593 tx_queue_stats_mappings[i].queue_id,
2594 tx_queue_stats_mappings[i].stats_counter_id);
2601 port->tx_queue_stats_mapping_enabled = 1;
2606 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2610 uint8_t mapping_found = 0;
2612 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2613 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2614 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2615 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2616 rx_queue_stats_mappings[i].queue_id,
2617 rx_queue_stats_mappings[i].stats_counter_id);
2624 port->rx_queue_stats_mapping_enabled = 1;
2629 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2633 diag = set_tx_queue_stats_mapping_registers(pi, port);
2635 if (diag == -ENOTSUP) {
2636 port->tx_queue_stats_mapping_enabled = 0;
2637 printf("TX queue stats mapping not supported port id=%d\n", pi);
2640 rte_exit(EXIT_FAILURE,
2641 "set_tx_queue_stats_mapping_registers "
2642 "failed for port id=%d diag=%d\n",
2646 diag = set_rx_queue_stats_mapping_registers(pi, port);
2648 if (diag == -ENOTSUP) {
2649 port->rx_queue_stats_mapping_enabled = 0;
2650 printf("RX queue stats mapping not supported port id=%d\n", pi);
2653 rte_exit(EXIT_FAILURE,
2654 "set_rx_queue_stats_mapping_registers "
2655 "failed for port id=%d diag=%d\n",
2661 rxtx_port_config(struct rte_port *port)
2665 for (qid = 0; qid < nb_rxq; qid++) {
2666 port->rx_conf[qid] = port->dev_info.default_rxconf;
2668 /* Check if any Rx parameters have been passed */
2669 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2670 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2672 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2673 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2675 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2676 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2678 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2679 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2681 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2682 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2684 port->nb_rx_desc[qid] = nb_rxd;
2687 for (qid = 0; qid < nb_txq; qid++) {
2688 port->tx_conf[qid] = port->dev_info.default_txconf;
2690 /* Check if any Tx parameters have been passed */
2691 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2692 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2694 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2695 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2697 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2698 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2700 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2701 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2703 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2704 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2706 port->nb_tx_desc[qid] = nb_txd;
2711 init_port_config(void)
2714 struct rte_port *port;
2716 RTE_ETH_FOREACH_DEV(pid) {
2718 port->dev_conf.fdir_conf = fdir_conf;
2719 rte_eth_dev_info_get(pid, &port->dev_info);
2721 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2722 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2723 rss_hf & port->dev_info.flow_type_rss_offloads;
2725 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2726 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2729 if (port->dcb_flag == 0) {
2730 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2731 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2733 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2736 rxtx_port_config(port);
2738 rte_eth_macaddr_get(pid, &port->eth_addr);
2740 map_port_queue_stats_mapping_registers(pid, port);
2741 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2742 rte_pmd_ixgbe_bypass_init(pid);
2745 if (lsc_interrupt &&
2746 (rte_eth_devices[pid].data->dev_flags &
2747 RTE_ETH_DEV_INTR_LSC))
2748 port->dev_conf.intr_conf.lsc = 1;
2749 if (rmv_interrupt &&
2750 (rte_eth_devices[pid].data->dev_flags &
2751 RTE_ETH_DEV_INTR_RMV))
2752 port->dev_conf.intr_conf.rmv = 1;
2756 void set_port_slave_flag(portid_t slave_pid)
2758 struct rte_port *port;
2760 port = &ports[slave_pid];
2761 port->slave_flag = 1;
2764 void clear_port_slave_flag(portid_t slave_pid)
2766 struct rte_port *port;
2768 port = &ports[slave_pid];
2769 port->slave_flag = 0;
2772 uint8_t port_is_bonding_slave(portid_t slave_pid)
2774 struct rte_port *port;
2776 port = &ports[slave_pid];
2777 if ((rte_eth_devices[slave_pid].data->dev_flags &
2778 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2783 const uint16_t vlan_tags[] = {
2784 0, 1, 2, 3, 4, 5, 6, 7,
2785 8, 9, 10, 11, 12, 13, 14, 15,
2786 16, 17, 18, 19, 20, 21, 22, 23,
2787 24, 25, 26, 27, 28, 29, 30, 31
2791 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2792 enum dcb_mode_enable dcb_mode,
2793 enum rte_eth_nb_tcs num_tcs,
2798 struct rte_eth_rss_conf rss_conf;
2801 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2802 * given above, and the number of traffic classes available for use.
2804 if (dcb_mode == DCB_VT_ENABLED) {
2805 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2806 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2807 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2808 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2810 /* VMDQ+DCB RX and TX configurations */
2811 vmdq_rx_conf->enable_default_pool = 0;
2812 vmdq_rx_conf->default_pool = 0;
2813 vmdq_rx_conf->nb_queue_pools =
2814 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2815 vmdq_tx_conf->nb_queue_pools =
2816 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2818 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2819 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2820 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2821 vmdq_rx_conf->pool_map[i].pools =
2822 1 << (i % vmdq_rx_conf->nb_queue_pools);
2824 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2825 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2826 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2829 /* set DCB mode of RX and TX of multiple queues */
2830 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2831 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2833 struct rte_eth_dcb_rx_conf *rx_conf =
2834 ð_conf->rx_adv_conf.dcb_rx_conf;
2835 struct rte_eth_dcb_tx_conf *tx_conf =
2836 ð_conf->tx_adv_conf.dcb_tx_conf;
2838 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2842 rx_conf->nb_tcs = num_tcs;
2843 tx_conf->nb_tcs = num_tcs;
2845 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2846 rx_conf->dcb_tc[i] = i % num_tcs;
2847 tx_conf->dcb_tc[i] = i % num_tcs;
2850 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2851 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2852 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2856 eth_conf->dcb_capability_en =
2857 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2859 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2865 init_port_dcb_config(portid_t pid,
2866 enum dcb_mode_enable dcb_mode,
2867 enum rte_eth_nb_tcs num_tcs,
2870 struct rte_eth_conf port_conf;
2871 struct rte_port *rte_port;
2875 rte_port = &ports[pid];
2877 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2878 /* Enter DCB configuration status */
2881 port_conf.rxmode = rte_port->dev_conf.rxmode;
2882 port_conf.txmode = rte_port->dev_conf.txmode;
2884 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2885 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2888 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2890 /* re-configure the device . */
2891 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2893 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2895 /* If dev_info.vmdq_pool_base is greater than 0,
2896 * the queue id of vmdq pools is started after pf queues.
2898 if (dcb_mode == DCB_VT_ENABLED &&
2899 rte_port->dev_info.vmdq_pool_base > 0) {
2900 printf("VMDQ_DCB multi-queue mode is nonsensical"
2901 " for port %d.", pid);
2905 /* Assume the ports in testpmd have the same dcb capability
2906 * and has the same number of rxq and txq in dcb mode
2908 if (dcb_mode == DCB_VT_ENABLED) {
2909 if (rte_port->dev_info.max_vfs > 0) {
2910 nb_rxq = rte_port->dev_info.nb_rx_queues;
2911 nb_txq = rte_port->dev_info.nb_tx_queues;
2913 nb_rxq = rte_port->dev_info.max_rx_queues;
2914 nb_txq = rte_port->dev_info.max_tx_queues;
2917 /*if vt is disabled, use all pf queues */
2918 if (rte_port->dev_info.vmdq_pool_base == 0) {
2919 nb_rxq = rte_port->dev_info.max_rx_queues;
2920 nb_txq = rte_port->dev_info.max_tx_queues;
2922 nb_rxq = (queueid_t)num_tcs;
2923 nb_txq = (queueid_t)num_tcs;
2927 rx_free_thresh = 64;
2929 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2931 rxtx_port_config(rte_port);
2933 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2934 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2935 rx_vft_set(pid, vlan_tags[i], 1);
2937 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2938 map_port_queue_stats_mapping_registers(pid, rte_port);
2940 rte_port->dcb_flag = 1;
2948 /* Configuration of Ethernet ports. */
2949 ports = rte_zmalloc("testpmd: ports",
2950 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2951 RTE_CACHE_LINE_SIZE);
2952 if (ports == NULL) {
2953 rte_exit(EXIT_FAILURE,
2954 "rte_zmalloc(%d struct rte_port) failed\n",
2958 /* Initialize ports NUMA structures */
2959 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2960 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2961 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2975 const char clr[] = { 27, '[', '2', 'J', '\0' };
2976 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2978 /* Clear screen and move to top left */
2979 printf("%s%s", clr, top_left);
2981 printf("\nPort statistics ====================================");
2982 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2983 nic_stats_display(fwd_ports_ids[i]);
2987 signal_handler(int signum)
2989 if (signum == SIGINT || signum == SIGTERM) {
2990 printf("\nSignal %d received, preparing to exit...\n",
2992 #ifdef RTE_LIBRTE_PDUMP
2993 /* uninitialize packet capture framework */
2996 #ifdef RTE_LIBRTE_LATENCY_STATS
2997 rte_latencystats_uninit();
3000 /* Set flag to indicate the force termination. */
3002 /* exit with the expected status */
3003 signal(signum, SIG_DFL);
3004 kill(getpid(), signum);
3009 main(int argc, char** argv)
3016 signal(SIGINT, signal_handler);
3017 signal(SIGTERM, signal_handler);
3019 diag = rte_eal_init(argc, argv);
3021 rte_panic("Cannot init EAL\n");
3023 testpmd_logtype = rte_log_register("testpmd");
3024 if (testpmd_logtype < 0)
3025 rte_panic("Cannot register log type");
3026 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3028 #ifdef RTE_LIBRTE_PDUMP
3029 /* initialize packet capture framework */
3030 rte_pdump_init(NULL);
3034 RTE_ETH_FOREACH_DEV(port_id) {
3035 ports_ids[count] = port_id;
3038 nb_ports = (portid_t) count;
3040 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3042 /* allocate port structures, and init them */
3045 set_def_fwd_config();
3047 rte_panic("Empty set of forwarding logical cores - check the "
3048 "core mask supplied in the command parameters\n");
3050 /* Bitrate/latency stats disabled by default */
3051 #ifdef RTE_LIBRTE_BITRATE
3052 bitrate_enabled = 0;
3054 #ifdef RTE_LIBRTE_LATENCY_STATS
3055 latencystats_enabled = 0;
3058 /* on FreeBSD, mlockall() is disabled by default */
3059 #ifdef RTE_EXEC_ENV_BSDAPP
3068 launch_args_parse(argc, argv);
3070 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3071 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3075 if (tx_first && interactive)
3076 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3077 "interactive mode.\n");
3079 if (tx_first && lsc_interrupt) {
3080 printf("Warning: lsc_interrupt needs to be off when "
3081 " using tx_first. Disabling.\n");
3085 if (!nb_rxq && !nb_txq)
3086 printf("Warning: Either rx or tx queues should be non-zero\n");
3088 if (nb_rxq > 1 && nb_rxq > nb_txq)
3089 printf("Warning: nb_rxq=%d enables RSS configuration, "
3090 "but nb_txq=%d will prevent to fully test it.\n",
3096 ret = rte_dev_hotplug_handle_enable();
3099 "fail to enable hotplug handling.");
3103 ret = rte_dev_event_monitor_start();
3106 "fail to start device event monitoring.");
3110 ret = rte_dev_event_callback_register(NULL,
3111 eth_dev_event_callback, NULL);
3114 "fail to register device event callback\n");
3119 if (start_port(RTE_PORT_ALL) != 0)
3120 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3122 /* set all ports to promiscuous mode by default */
3123 RTE_ETH_FOREACH_DEV(port_id)
3124 rte_eth_promiscuous_enable(port_id);
3126 /* Init metrics library */
3127 rte_metrics_init(rte_socket_id());
3129 #ifdef RTE_LIBRTE_LATENCY_STATS
3130 if (latencystats_enabled != 0) {
3131 int ret = rte_latencystats_init(1, NULL);
3133 printf("Warning: latencystats init()"
3134 " returned error %d\n", ret);
3135 printf("Latencystats running on lcore %d\n",
3136 latencystats_lcore_id);
3140 /* Setup bitrate stats */
3141 #ifdef RTE_LIBRTE_BITRATE
3142 if (bitrate_enabled != 0) {
3143 bitrate_data = rte_stats_bitrate_create();
3144 if (bitrate_data == NULL)
3145 rte_exit(EXIT_FAILURE,
3146 "Could not allocate bitrate data.\n");
3147 rte_stats_bitrate_reg(bitrate_data);
3151 #ifdef RTE_LIBRTE_CMDLINE
3152 if (strlen(cmdline_filename) != 0)
3153 cmdline_read_from_file(cmdline_filename);
3155 if (interactive == 1) {
3157 printf("Start automatic packet forwarding\n");
3158 start_packet_forwarding(0);
3170 printf("No commandline core given, start packet forwarding\n");
3171 start_packet_forwarding(tx_first);
3172 if (stats_period != 0) {
3173 uint64_t prev_time = 0, cur_time, diff_time = 0;
3174 uint64_t timer_period;
3176 /* Convert to number of cycles */
3177 timer_period = stats_period * rte_get_timer_hz();
3179 while (f_quit == 0) {
3180 cur_time = rte_get_timer_cycles();
3181 diff_time += cur_time - prev_time;
3183 if (diff_time >= timer_period) {
3185 /* Reset the timer */
3188 /* Sleep to avoid unnecessary checks */
3189 prev_time = cur_time;
3194 printf("Press enter to exit\n");
3195 rc = read(0, &c, 1);