1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
230 * Configurable number of RX/TX queues.
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
236 * Configurable number of RX/TX ring descriptors.
237 * Defaults are supplied by drivers via ethdev.
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
244 #define RTE_PMD_PARAM_UNSET -1
246 * Configurable values of RX and TX ring threshold registers.
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of RX free threshold.
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of RX drop enable.
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
268 * Configurable value of TX free threshold.
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
273 * Configurable value of TX RS bit threshold.
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
278 * Configurable value of buffered packets before sending.
280 uint16_t noisy_tx_sw_bufsz;
283 * Configurable value of packet buffer timeout.
285 uint16_t noisy_tx_sw_buf_flush_time;
288 * Configurable value for size of VNF internal memory area
289 * used for simulating noisy neighbour behaviour
291 uint64_t noisy_lkup_mem_sz;
294 * Configurable value of number of random writes done in
295 * VNF simulation memory area.
297 uint64_t noisy_lkup_num_writes;
300 * Configurable value of number of random reads done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_reads;
306 * Configurable value of number of random reads/writes done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads_writes;
312 * Receive Side Scaling (RSS) configuration.
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
317 * Port topology configuration
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
322 * Avoids to flush all the RX streams before starts forwarding.
324 uint8_t no_flush_rx = 0; /* flush by default */
327 * Flow API isolated mode.
329 uint8_t flow_isolate_all;
332 * Avoids to check link status when starting/stopping a port.
334 uint8_t no_link_check = 0; /* check by default */
337 * Enable link status change notification
339 uint8_t lsc_interrupt = 1; /* enabled by default */
342 * Enable device removal notification.
344 uint8_t rmv_interrupt = 1; /* enabled by default */
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
349 * Display or mask ether events
350 * Default to all events except VF_MBOX
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
360 * Decide if all memory are locked for performance.
365 * NIC bypass mode configuration options.
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
374 #ifdef RTE_LIBRTE_LATENCY_STATS
377 * Set when latency stats is enabled in the commandline
379 uint8_t latencystats_enabled;
382 * Lcore ID to serive latency statistics.
384 lcoreid_t latencystats_lcore_id = -1;
389 * Ethernet device configuration.
391 struct rte_eth_rxmode rx_mode = {
392 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
395 struct rte_eth_txmode tx_mode = {
396 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
399 struct rte_fdir_conf fdir_conf = {
400 .mode = RTE_FDIR_MODE_NONE,
401 .pballoc = RTE_FDIR_PBALLOC_64K,
402 .status = RTE_FDIR_REPORT_STATUS,
404 .vlan_tci_mask = 0xFFEF,
406 .src_ip = 0xFFFFFFFF,
407 .dst_ip = 0xFFFFFFFF,
410 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
413 .src_port_mask = 0xFFFF,
414 .dst_port_mask = 0xFFFF,
415 .mac_addr_byte_mask = 0xFF,
416 .tunnel_type_mask = 1,
417 .tunnel_id_mask = 0xFFFFFFFF,
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
434 * Display zero values by default for xstats
436 uint8_t xstats_hide_zero;
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
451 struct vxlan_encap_conf vxlan_encap_conf = {
454 .vni = "\x00\x00\x00",
456 .udp_dst = RTE_BE16(4789),
457 .ipv4_src = IPv4(127, 0, 0, 1),
458 .ipv4_dst = IPv4(255, 255, 255, 255),
459 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460 "\x00\x00\x00\x00\x00\x00\x00\x01",
461 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462 "\x00\x00\x00\x00\x00\x00\x11\x11",
464 .eth_src = "\x00\x00\x00\x00\x00\x00",
465 .eth_dst = "\xff\xff\xff\xff\xff\xff",
468 struct nvgre_encap_conf nvgre_encap_conf = {
471 .tni = "\x00\x00\x00",
472 .ipv4_src = IPv4(127, 0, 0, 1),
473 .ipv4_dst = IPv4(255, 255, 255, 255),
474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 "\x00\x00\x00\x00\x00\x00\x00\x01",
476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 "\x00\x00\x00\x00\x00\x00\x11\x11",
479 .eth_src = "\x00\x00\x00\x00\x00\x00",
480 .eth_dst = "\xff\xff\xff\xff\xff\xff",
483 /* Forward function declarations */
484 static void map_port_queue_stats_mapping_registers(portid_t pi,
485 struct rte_port *port);
486 static void check_all_ports_link_status(uint32_t port_mask);
487 static int eth_event_callback(portid_t port_id,
488 enum rte_eth_event_type type,
489 void *param, void *ret_param);
490 static void eth_dev_event_callback(char *device_name,
491 enum rte_dev_event_type type,
493 static int eth_dev_event_callback_register(void);
494 static int eth_dev_event_callback_unregister(void);
498 * Check if all the ports are started.
499 * If yes, return positive value. If not, return zero.
501 static int all_ports_started(void);
503 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
504 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
507 * Helper function to check if socket is already discovered.
508 * If yes, return positive value. If not, return zero.
511 new_socket_id(unsigned int socket_id)
515 for (i = 0; i < num_sockets; i++) {
516 if (socket_ids[i] == socket_id)
523 * Setup default configuration.
526 set_default_fwd_lcores_config(void)
530 unsigned int sock_num;
533 for (i = 0; i < RTE_MAX_LCORE; i++) {
534 if (!rte_lcore_is_enabled(i))
536 sock_num = rte_lcore_to_socket_id(i);
537 if (new_socket_id(sock_num)) {
538 if (num_sockets >= RTE_MAX_NUMA_NODES) {
539 rte_exit(EXIT_FAILURE,
540 "Total sockets greater than %u\n",
543 socket_ids[num_sockets++] = sock_num;
545 if (i == rte_get_master_lcore())
547 fwd_lcores_cpuids[nb_lc++] = i;
549 nb_lcores = (lcoreid_t) nb_lc;
550 nb_cfg_lcores = nb_lcores;
555 set_def_peer_eth_addrs(void)
559 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
560 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
561 peer_eth_addrs[i].addr_bytes[5] = i;
566 set_default_fwd_ports_config(void)
571 RTE_ETH_FOREACH_DEV(pt_id)
572 fwd_ports_ids[i++] = pt_id;
574 nb_cfg_ports = nb_ports;
575 nb_fwd_ports = nb_ports;
579 set_def_fwd_config(void)
581 set_default_fwd_lcores_config();
582 set_def_peer_eth_addrs();
583 set_default_fwd_ports_config();
586 /* extremely pessimistic estimation of memory required to create a mempool */
588 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
590 unsigned int n_pages, mbuf_per_pg, leftover;
591 uint64_t total_mem, mbuf_mem, obj_sz;
593 /* there is no good way to predict how much space the mempool will
594 * occupy because it will allocate chunks on the fly, and some of those
595 * will come from default DPDK memory while some will come from our
596 * external memory, so just assume 128MB will be enough for everyone.
598 uint64_t hdr_mem = 128 << 20;
600 /* account for possible non-contiguousness */
601 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
603 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
607 mbuf_per_pg = pgsz / obj_sz;
608 leftover = (nb_mbufs % mbuf_per_pg) > 0;
609 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
611 mbuf_mem = n_pages * pgsz;
613 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
615 if (total_mem > SIZE_MAX) {
616 TESTPMD_LOG(ERR, "Memory size too big\n");
619 *out = (size_t)total_mem;
624 static inline uint32_t
627 return (uint32_t)__builtin_ctzll(v);
630 static inline uint32_t
635 v = rte_align64pow2(v);
640 pagesz_flags(uint64_t page_sz)
642 /* as per mmap() manpage, all page sizes are log2 of page size
643 * shifted by MAP_HUGE_SHIFT
645 int log2 = log2_u64(page_sz);
647 return (log2 << HUGE_SHIFT);
651 alloc_mem(size_t memsz, size_t pgsz, bool huge)
656 /* allocate anonymous hugepages */
657 flags = MAP_ANONYMOUS | MAP_PRIVATE;
659 flags |= HUGE_FLAG | pagesz_flags(pgsz);
661 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
662 if (addr == MAP_FAILED)
668 struct extmem_param {
672 rte_iova_t *iova_table;
673 unsigned int iova_table_len;
677 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
680 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
681 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
682 unsigned int cur_page, n_pages, pgsz_idx;
683 size_t mem_sz, cur_pgsz;
684 rte_iova_t *iovas = NULL;
688 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
689 /* skip anything that is too big */
690 if (pgsizes[pgsz_idx] > SIZE_MAX)
693 cur_pgsz = pgsizes[pgsz_idx];
695 /* if we were told not to allocate hugepages, override */
697 cur_pgsz = sysconf(_SC_PAGESIZE);
699 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
701 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
705 /* allocate our memory */
706 addr = alloc_mem(mem_sz, cur_pgsz, huge);
708 /* if we couldn't allocate memory with a specified page size,
709 * that doesn't mean we can't do it with other page sizes, so
715 /* store IOVA addresses for every page in this memory area */
716 n_pages = mem_sz / cur_pgsz;
718 iovas = malloc(sizeof(*iovas) * n_pages);
721 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
724 /* lock memory if it's not huge pages */
728 /* populate IOVA addresses */
729 for (cur_page = 0; cur_page < n_pages; cur_page++) {
734 offset = cur_pgsz * cur_page;
735 cur = RTE_PTR_ADD(addr, offset);
737 /* touch the page before getting its IOVA */
738 *(volatile char *)cur = 0;
740 iova = rte_mem_virt2iova(cur);
742 iovas[cur_page] = iova;
747 /* if we couldn't allocate anything */
753 param->pgsz = cur_pgsz;
754 param->iova_table = iovas;
755 param->iova_table_len = n_pages;
762 munmap(addr, mem_sz);
768 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
770 struct extmem_param param;
773 memset(¶m, 0, sizeof(param));
775 /* check if our heap exists */
776 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
778 /* create our heap */
779 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
781 TESTPMD_LOG(ERR, "Cannot create heap\n");
786 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
788 TESTPMD_LOG(ERR, "Cannot create memory area\n");
792 /* we now have a valid memory area, so add it to heap */
793 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
794 param.addr, param.len, param.iova_table,
795 param.iova_table_len, param.pgsz);
797 /* when using VFIO, memory is automatically mapped for DMA by EAL */
799 /* not needed any more */
800 free(param.iova_table);
803 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
804 munmap(param.addr, param.len);
810 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
817 * Configuration initialisation done once at init time.
820 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
821 unsigned int socket_id)
823 char pool_name[RTE_MEMPOOL_NAMESIZE];
824 struct rte_mempool *rte_mp = NULL;
827 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
828 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
831 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
832 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
834 switch (mp_alloc_type) {
835 case MP_ALLOC_NATIVE:
837 /* wrapper to rte_mempool_create() */
838 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
839 rte_mbuf_best_mempool_ops());
840 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
841 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
846 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
847 mb_size, (unsigned int) mb_mempool_cache,
848 sizeof(struct rte_pktmbuf_pool_private),
853 if (rte_mempool_populate_anon(rte_mp) == 0) {
854 rte_mempool_free(rte_mp);
858 rte_pktmbuf_pool_init(rte_mp, NULL);
859 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
863 case MP_ALLOC_XMEM_HUGE:
866 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
868 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
869 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
872 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
874 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
876 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
877 rte_mbuf_best_mempool_ops());
878 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
879 mb_mempool_cache, 0, mbuf_seg_size,
885 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
890 if (rte_mp == NULL) {
891 rte_exit(EXIT_FAILURE,
892 "Creation of mbuf pool for socket %u failed: %s\n",
893 socket_id, rte_strerror(rte_errno));
894 } else if (verbose_level > 0) {
895 rte_mempool_dump(stdout, rte_mp);
900 * Check given socket id is valid or not with NUMA mode,
901 * if valid, return 0, else return -1
904 check_socket_id(const unsigned int socket_id)
906 static int warning_once = 0;
908 if (new_socket_id(socket_id)) {
909 if (!warning_once && numa_support)
910 printf("Warning: NUMA should be configured manually by"
911 " using --port-numa-config and"
912 " --ring-numa-config parameters along with"
921 * Get the allowed maximum number of RX queues.
922 * *pid return the port id which has minimal value of
923 * max_rx_queues in all ports.
926 get_allowed_max_nb_rxq(portid_t *pid)
928 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
930 struct rte_eth_dev_info dev_info;
932 RTE_ETH_FOREACH_DEV(pi) {
933 rte_eth_dev_info_get(pi, &dev_info);
934 if (dev_info.max_rx_queues < allowed_max_rxq) {
935 allowed_max_rxq = dev_info.max_rx_queues;
939 return allowed_max_rxq;
943 * Check input rxq is valid or not.
944 * If input rxq is not greater than any of maximum number
945 * of RX queues of all ports, it is valid.
946 * if valid, return 0, else return -1
949 check_nb_rxq(queueid_t rxq)
951 queueid_t allowed_max_rxq;
954 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
955 if (rxq > allowed_max_rxq) {
956 printf("Fail: input rxq (%u) can't be greater "
957 "than max_rx_queues (%u) of port %u\n",
967 * Get the allowed maximum number of TX queues.
968 * *pid return the port id which has minimal value of
969 * max_tx_queues in all ports.
972 get_allowed_max_nb_txq(portid_t *pid)
974 queueid_t allowed_max_txq = MAX_QUEUE_ID;
976 struct rte_eth_dev_info dev_info;
978 RTE_ETH_FOREACH_DEV(pi) {
979 rte_eth_dev_info_get(pi, &dev_info);
980 if (dev_info.max_tx_queues < allowed_max_txq) {
981 allowed_max_txq = dev_info.max_tx_queues;
985 return allowed_max_txq;
989 * Check input txq is valid or not.
990 * If input txq is not greater than any of maximum number
991 * of TX queues of all ports, it is valid.
992 * if valid, return 0, else return -1
995 check_nb_txq(queueid_t txq)
997 queueid_t allowed_max_txq;
1000 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1001 if (txq > allowed_max_txq) {
1002 printf("Fail: input txq (%u) can't be greater "
1003 "than max_tx_queues (%u) of port %u\n",
1016 struct rte_port *port;
1017 struct rte_mempool *mbp;
1018 unsigned int nb_mbuf_per_pool;
1020 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1021 struct rte_gro_param gro_param;
1025 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1028 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
1029 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
1030 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
1033 /* Configuration of logical cores. */
1034 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1035 sizeof(struct fwd_lcore *) * nb_lcores,
1036 RTE_CACHE_LINE_SIZE);
1037 if (fwd_lcores == NULL) {
1038 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1039 "failed\n", nb_lcores);
1041 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1042 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1043 sizeof(struct fwd_lcore),
1044 RTE_CACHE_LINE_SIZE);
1045 if (fwd_lcores[lc_id] == NULL) {
1046 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1049 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1052 RTE_ETH_FOREACH_DEV(pid) {
1054 /* Apply default TxRx configuration for all ports */
1055 port->dev_conf.txmode = tx_mode;
1056 port->dev_conf.rxmode = rx_mode;
1057 rte_eth_dev_info_get(pid, &port->dev_info);
1059 if (!(port->dev_info.tx_offload_capa &
1060 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1061 port->dev_conf.txmode.offloads &=
1062 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1064 if (port_numa[pid] != NUMA_NO_CONFIG)
1065 port_per_socket[port_numa[pid]]++;
1067 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1069 /* if socket_id is invalid, set to 0 */
1070 if (check_socket_id(socket_id) < 0)
1072 port_per_socket[socket_id]++;
1076 /* Apply Rx offloads configuration */
1077 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1078 port->rx_conf[k].offloads =
1079 port->dev_conf.rxmode.offloads;
1080 /* Apply Tx offloads configuration */
1081 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1082 port->tx_conf[k].offloads =
1083 port->dev_conf.txmode.offloads;
1085 /* set flag to initialize port/queue */
1086 port->need_reconfig = 1;
1087 port->need_reconfig_queues = 1;
1091 * Create pools of mbuf.
1092 * If NUMA support is disabled, create a single pool of mbuf in
1093 * socket 0 memory by default.
1094 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1096 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1097 * nb_txd can be configured at run time.
1099 if (param_total_num_mbufs)
1100 nb_mbuf_per_pool = param_total_num_mbufs;
1102 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1103 (nb_lcores * mb_mempool_cache) +
1104 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1105 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1111 for (i = 0; i < num_sockets; i++)
1112 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1115 if (socket_num == UMA_NO_CONFIG)
1116 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1118 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1124 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1125 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1127 * Records which Mbuf pool to use by each logical core, if needed.
1129 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1130 mbp = mbuf_pool_find(
1131 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1134 mbp = mbuf_pool_find(0);
1135 fwd_lcores[lc_id]->mbp = mbp;
1136 /* initialize GSO context */
1137 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1138 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1139 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1140 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1142 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1145 /* Configuration of packet forwarding streams. */
1146 if (init_fwd_streams() < 0)
1147 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1151 /* create a gro context for each lcore */
1152 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1153 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1154 gro_param.max_item_per_flow = MAX_PKT_BURST;
1155 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1156 gro_param.socket_id = rte_lcore_to_socket_id(
1157 fwd_lcores_cpuids[lc_id]);
1158 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1159 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1160 rte_exit(EXIT_FAILURE,
1161 "rte_gro_ctx_create() failed\n");
1165 #if defined RTE_LIBRTE_PMD_SOFTNIC
1166 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1167 RTE_ETH_FOREACH_DEV(pid) {
1169 const char *driver = port->dev_info.driver_name;
1171 if (strcmp(driver, "net_softnic") == 0)
1172 port->softport.fwd_lcore_arg = fwd_lcores;
1181 reconfig(portid_t new_port_id, unsigned socket_id)
1183 struct rte_port *port;
1185 /* Reconfiguration of Ethernet ports. */
1186 port = &ports[new_port_id];
1187 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1189 /* set flag to initialize port/queue */
1190 port->need_reconfig = 1;
1191 port->need_reconfig_queues = 1;
1192 port->socket_id = socket_id;
1199 init_fwd_streams(void)
1202 struct rte_port *port;
1203 streamid_t sm_id, nb_fwd_streams_new;
1206 /* set socket id according to numa or not */
1207 RTE_ETH_FOREACH_DEV(pid) {
1209 if (nb_rxq > port->dev_info.max_rx_queues) {
1210 printf("Fail: nb_rxq(%d) is greater than "
1211 "max_rx_queues(%d)\n", nb_rxq,
1212 port->dev_info.max_rx_queues);
1215 if (nb_txq > port->dev_info.max_tx_queues) {
1216 printf("Fail: nb_txq(%d) is greater than "
1217 "max_tx_queues(%d)\n", nb_txq,
1218 port->dev_info.max_tx_queues);
1222 if (port_numa[pid] != NUMA_NO_CONFIG)
1223 port->socket_id = port_numa[pid];
1225 port->socket_id = rte_eth_dev_socket_id(pid);
1227 /* if socket_id is invalid, set to 0 */
1228 if (check_socket_id(port->socket_id) < 0)
1229 port->socket_id = 0;
1233 if (socket_num == UMA_NO_CONFIG)
1234 port->socket_id = 0;
1236 port->socket_id = socket_num;
1240 q = RTE_MAX(nb_rxq, nb_txq);
1242 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1245 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1246 if (nb_fwd_streams_new == nb_fwd_streams)
1249 if (fwd_streams != NULL) {
1250 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1251 if (fwd_streams[sm_id] == NULL)
1253 rte_free(fwd_streams[sm_id]);
1254 fwd_streams[sm_id] = NULL;
1256 rte_free(fwd_streams);
1261 nb_fwd_streams = nb_fwd_streams_new;
1262 if (nb_fwd_streams) {
1263 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1264 sizeof(struct fwd_stream *) * nb_fwd_streams,
1265 RTE_CACHE_LINE_SIZE);
1266 if (fwd_streams == NULL)
1267 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1268 " (struct fwd_stream *)) failed\n",
1271 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1272 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1273 " struct fwd_stream", sizeof(struct fwd_stream),
1274 RTE_CACHE_LINE_SIZE);
1275 if (fwd_streams[sm_id] == NULL)
1276 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1277 "(struct fwd_stream) failed\n");
1284 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1286 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1288 unsigned int total_burst;
1289 unsigned int nb_burst;
1290 unsigned int burst_stats[3];
1291 uint16_t pktnb_stats[3];
1293 int burst_percent[3];
1296 * First compute the total number of packet bursts and the
1297 * two highest numbers of bursts of the same number of packets.
1300 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1301 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1302 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1303 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1306 total_burst += nb_burst;
1307 if (nb_burst > burst_stats[0]) {
1308 burst_stats[1] = burst_stats[0];
1309 pktnb_stats[1] = pktnb_stats[0];
1310 burst_stats[0] = nb_burst;
1311 pktnb_stats[0] = nb_pkt;
1312 } else if (nb_burst > burst_stats[1]) {
1313 burst_stats[1] = nb_burst;
1314 pktnb_stats[1] = nb_pkt;
1317 if (total_burst == 0)
1319 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1320 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1321 burst_percent[0], (int) pktnb_stats[0]);
1322 if (burst_stats[0] == total_burst) {
1326 if (burst_stats[0] + burst_stats[1] == total_burst) {
1327 printf(" + %d%% of %d pkts]\n",
1328 100 - burst_percent[0], pktnb_stats[1]);
1331 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1332 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1333 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1334 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1337 printf(" + %d%% of %d pkts + %d%% of others]\n",
1338 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1340 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1343 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1345 struct rte_port *port;
1348 static const char *fwd_stats_border = "----------------------";
1350 port = &ports[port_id];
1351 printf("\n %s Forward statistics for port %-2d %s\n",
1352 fwd_stats_border, port_id, fwd_stats_border);
1354 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1355 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1357 stats->ipackets, stats->imissed,
1358 (uint64_t) (stats->ipackets + stats->imissed));
1360 if (cur_fwd_eng == &csum_fwd_engine)
1361 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1362 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1363 port->rx_bad_outer_l4_csum);
1364 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1365 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1366 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1369 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1371 stats->opackets, port->tx_dropped,
1372 (uint64_t) (stats->opackets + port->tx_dropped));
1375 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1377 stats->ipackets, stats->imissed,
1378 (uint64_t) (stats->ipackets + stats->imissed));
1380 if (cur_fwd_eng == &csum_fwd_engine)
1381 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
1382 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1383 port->rx_bad_outer_l4_csum);
1384 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1385 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1386 printf(" RX-nombufs: %14"PRIu64"\n",
1390 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1392 stats->opackets, port->tx_dropped,
1393 (uint64_t) (stats->opackets + port->tx_dropped));
1396 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1397 if (port->rx_stream)
1398 pkt_burst_stats_display("RX",
1399 &port->rx_stream->rx_burst_stats);
1400 if (port->tx_stream)
1401 pkt_burst_stats_display("TX",
1402 &port->tx_stream->tx_burst_stats);
1405 if (port->rx_queue_stats_mapping_enabled) {
1407 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1408 printf(" Stats reg %2d RX-packets:%14"PRIu64
1409 " RX-errors:%14"PRIu64
1410 " RX-bytes:%14"PRIu64"\n",
1411 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1415 if (port->tx_queue_stats_mapping_enabled) {
1416 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1417 printf(" Stats reg %2d TX-packets:%14"PRIu64
1418 " TX-bytes:%14"PRIu64"\n",
1419 i, stats->q_opackets[i], stats->q_obytes[i]);
1423 printf(" %s--------------------------------%s\n",
1424 fwd_stats_border, fwd_stats_border);
1428 fwd_stream_stats_display(streamid_t stream_id)
1430 struct fwd_stream *fs;
1431 static const char *fwd_top_stats_border = "-------";
1433 fs = fwd_streams[stream_id];
1434 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1435 (fs->fwd_dropped == 0))
1437 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1438 "TX Port=%2d/Queue=%2d %s\n",
1439 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1440 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1441 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1442 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1444 /* if checksum mode */
1445 if (cur_fwd_eng == &csum_fwd_engine) {
1446 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1447 "%-14u Rx- bad outer L4 checksum: %-14u\n",
1448 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1449 fs->rx_bad_outer_l4_csum);
1452 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1453 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1454 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1459 flush_fwd_rx_queues(void)
1461 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1468 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1469 uint64_t timer_period;
1471 /* convert to number of cycles */
1472 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1474 for (j = 0; j < 2; j++) {
1475 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1476 for (rxq = 0; rxq < nb_rxq; rxq++) {
1477 port_id = fwd_ports_ids[rxp];
1479 * testpmd can stuck in the below do while loop
1480 * if rte_eth_rx_burst() always returns nonzero
1481 * packets. So timer is added to exit this loop
1482 * after 1sec timer expiry.
1484 prev_tsc = rte_rdtsc();
1486 nb_rx = rte_eth_rx_burst(port_id, rxq,
1487 pkts_burst, MAX_PKT_BURST);
1488 for (i = 0; i < nb_rx; i++)
1489 rte_pktmbuf_free(pkts_burst[i]);
1491 cur_tsc = rte_rdtsc();
1492 diff_tsc = cur_tsc - prev_tsc;
1493 timer_tsc += diff_tsc;
1494 } while ((nb_rx > 0) &&
1495 (timer_tsc < timer_period));
1499 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1504 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1506 struct fwd_stream **fsm;
1509 #ifdef RTE_LIBRTE_BITRATE
1510 uint64_t tics_per_1sec;
1511 uint64_t tics_datum;
1512 uint64_t tics_current;
1513 uint16_t i, cnt_ports;
1515 cnt_ports = nb_ports;
1516 tics_datum = rte_rdtsc();
1517 tics_per_1sec = rte_get_timer_hz();
1519 fsm = &fwd_streams[fc->stream_idx];
1520 nb_fs = fc->stream_nb;
1522 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1523 (*pkt_fwd)(fsm[sm_id]);
1524 #ifdef RTE_LIBRTE_BITRATE
1525 if (bitrate_enabled != 0 &&
1526 bitrate_lcore_id == rte_lcore_id()) {
1527 tics_current = rte_rdtsc();
1528 if (tics_current - tics_datum >= tics_per_1sec) {
1529 /* Periodic bitrate calculation */
1530 for (i = 0; i < cnt_ports; i++)
1531 rte_stats_bitrate_calc(bitrate_data,
1533 tics_datum = tics_current;
1537 #ifdef RTE_LIBRTE_LATENCY_STATS
1538 if (latencystats_enabled != 0 &&
1539 latencystats_lcore_id == rte_lcore_id())
1540 rte_latencystats_update();
1543 } while (! fc->stopped);
1547 start_pkt_forward_on_core(void *fwd_arg)
1549 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1550 cur_fwd_config.fwd_eng->packet_fwd);
1555 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1556 * Used to start communication flows in network loopback test configurations.
1559 run_one_txonly_burst_on_core(void *fwd_arg)
1561 struct fwd_lcore *fwd_lc;
1562 struct fwd_lcore tmp_lcore;
1564 fwd_lc = (struct fwd_lcore *) fwd_arg;
1565 tmp_lcore = *fwd_lc;
1566 tmp_lcore.stopped = 1;
1567 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1572 * Launch packet forwarding:
1573 * - Setup per-port forwarding context.
1574 * - launch logical cores with their forwarding configuration.
1577 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1579 port_fwd_begin_t port_fwd_begin;
1584 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1585 if (port_fwd_begin != NULL) {
1586 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1587 (*port_fwd_begin)(fwd_ports_ids[i]);
1589 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1590 lc_id = fwd_lcores_cpuids[i];
1591 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1592 fwd_lcores[i]->stopped = 0;
1593 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1594 fwd_lcores[i], lc_id);
1596 printf("launch lcore %u failed - diag=%d\n",
1603 * Update the forward ports list.
1606 update_fwd_ports(portid_t new_pid)
1609 unsigned int new_nb_fwd_ports = 0;
1612 for (i = 0; i < nb_fwd_ports; ++i) {
1613 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1616 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1620 if (new_pid < RTE_MAX_ETHPORTS)
1621 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1623 nb_fwd_ports = new_nb_fwd_ports;
1624 nb_cfg_ports = new_nb_fwd_ports;
1628 * Launch packet forwarding configuration.
1631 start_packet_forwarding(int with_tx_first)
1633 port_fwd_begin_t port_fwd_begin;
1634 port_fwd_end_t port_fwd_end;
1635 struct rte_port *port;
1640 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1641 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1643 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1644 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1646 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1647 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1648 (!nb_rxq || !nb_txq))
1649 rte_exit(EXIT_FAILURE,
1650 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1651 cur_fwd_eng->fwd_mode_name);
1653 if (all_ports_started() == 0) {
1654 printf("Not all ports were started\n");
1657 if (test_done == 0) {
1658 printf("Packet forwarding already started\n");
1664 for (i = 0; i < nb_fwd_ports; i++) {
1665 pt_id = fwd_ports_ids[i];
1666 port = &ports[pt_id];
1667 if (!port->dcb_flag) {
1668 printf("In DCB mode, all forwarding ports must "
1669 "be configured in this mode.\n");
1673 if (nb_fwd_lcores == 1) {
1674 printf("In DCB mode,the nb forwarding cores "
1675 "should be larger than 1.\n");
1684 flush_fwd_rx_queues();
1686 pkt_fwd_config_display(&cur_fwd_config);
1687 rxtx_config_display();
1689 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1690 pt_id = fwd_ports_ids[i];
1691 port = &ports[pt_id];
1692 rte_eth_stats_get(pt_id, &port->stats);
1693 port->tx_dropped = 0;
1695 map_port_queue_stats_mapping_registers(pt_id, port);
1697 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1698 fwd_streams[sm_id]->rx_packets = 0;
1699 fwd_streams[sm_id]->tx_packets = 0;
1700 fwd_streams[sm_id]->fwd_dropped = 0;
1701 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1702 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1703 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1705 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1706 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1707 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1708 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1709 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1711 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1712 fwd_streams[sm_id]->core_cycles = 0;
1715 if (with_tx_first) {
1716 port_fwd_begin = tx_only_engine.port_fwd_begin;
1717 if (port_fwd_begin != NULL) {
1718 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1719 (*port_fwd_begin)(fwd_ports_ids[i]);
1721 while (with_tx_first--) {
1722 launch_packet_forwarding(
1723 run_one_txonly_burst_on_core);
1724 rte_eal_mp_wait_lcore();
1726 port_fwd_end = tx_only_engine.port_fwd_end;
1727 if (port_fwd_end != NULL) {
1728 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1729 (*port_fwd_end)(fwd_ports_ids[i]);
1732 launch_packet_forwarding(start_pkt_forward_on_core);
1736 stop_packet_forwarding(void)
1738 struct rte_eth_stats stats;
1739 struct rte_port *port;
1740 port_fwd_end_t port_fwd_end;
1745 uint64_t total_recv;
1746 uint64_t total_xmit;
1747 uint64_t total_rx_dropped;
1748 uint64_t total_tx_dropped;
1749 uint64_t total_rx_nombuf;
1750 uint64_t tx_dropped;
1751 uint64_t rx_bad_ip_csum;
1752 uint64_t rx_bad_l4_csum;
1753 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1754 uint64_t fwd_cycles;
1757 static const char *acc_stats_border = "+++++++++++++++";
1760 printf("Packet forwarding not started\n");
1763 printf("Telling cores to stop...");
1764 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1765 fwd_lcores[lc_id]->stopped = 1;
1766 printf("\nWaiting for lcores to finish...\n");
1767 rte_eal_mp_wait_lcore();
1768 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1769 if (port_fwd_end != NULL) {
1770 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1771 pt_id = fwd_ports_ids[i];
1772 (*port_fwd_end)(pt_id);
1775 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1778 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1779 if (cur_fwd_config.nb_fwd_streams >
1780 cur_fwd_config.nb_fwd_ports) {
1781 fwd_stream_stats_display(sm_id);
1782 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1783 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1785 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1787 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1790 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1791 tx_dropped = (uint64_t) (tx_dropped +
1792 fwd_streams[sm_id]->fwd_dropped);
1793 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1796 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1797 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1798 fwd_streams[sm_id]->rx_bad_ip_csum);
1799 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1803 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1804 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1805 fwd_streams[sm_id]->rx_bad_l4_csum);
1806 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1809 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1810 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1812 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1813 fwd_cycles = (uint64_t) (fwd_cycles +
1814 fwd_streams[sm_id]->core_cycles);
1819 total_rx_dropped = 0;
1820 total_tx_dropped = 0;
1821 total_rx_nombuf = 0;
1822 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1823 pt_id = fwd_ports_ids[i];
1825 port = &ports[pt_id];
1826 rte_eth_stats_get(pt_id, &stats);
1827 stats.ipackets -= port->stats.ipackets;
1828 port->stats.ipackets = 0;
1829 stats.opackets -= port->stats.opackets;
1830 port->stats.opackets = 0;
1831 stats.ibytes -= port->stats.ibytes;
1832 port->stats.ibytes = 0;
1833 stats.obytes -= port->stats.obytes;
1834 port->stats.obytes = 0;
1835 stats.imissed -= port->stats.imissed;
1836 port->stats.imissed = 0;
1837 stats.oerrors -= port->stats.oerrors;
1838 port->stats.oerrors = 0;
1839 stats.rx_nombuf -= port->stats.rx_nombuf;
1840 port->stats.rx_nombuf = 0;
1842 total_recv += stats.ipackets;
1843 total_xmit += stats.opackets;
1844 total_rx_dropped += stats.imissed;
1845 total_tx_dropped += port->tx_dropped;
1846 total_rx_nombuf += stats.rx_nombuf;
1848 fwd_port_stats_display(pt_id, &stats);
1851 printf("\n %s Accumulated forward statistics for all ports"
1853 acc_stats_border, acc_stats_border);
1854 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1856 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1858 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1859 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1860 if (total_rx_nombuf > 0)
1861 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1862 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1864 acc_stats_border, acc_stats_border);
1865 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1867 printf("\n CPU cycles/packet=%u (total cycles="
1868 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1869 (unsigned int)(fwd_cycles / total_recv),
1870 fwd_cycles, total_recv);
1872 printf("\nDone.\n");
1877 dev_set_link_up(portid_t pid)
1879 if (rte_eth_dev_set_link_up(pid) < 0)
1880 printf("\nSet link up fail.\n");
1884 dev_set_link_down(portid_t pid)
1886 if (rte_eth_dev_set_link_down(pid) < 0)
1887 printf("\nSet link down fail.\n");
1891 all_ports_started(void)
1894 struct rte_port *port;
1896 RTE_ETH_FOREACH_DEV(pi) {
1898 /* Check if there is a port which is not started */
1899 if ((port->port_status != RTE_PORT_STARTED) &&
1900 (port->slave_flag == 0))
1904 /* No port is not started */
1909 port_is_stopped(portid_t port_id)
1911 struct rte_port *port = &ports[port_id];
1913 if ((port->port_status != RTE_PORT_STOPPED) &&
1914 (port->slave_flag == 0))
1920 all_ports_stopped(void)
1924 RTE_ETH_FOREACH_DEV(pi) {
1925 if (!port_is_stopped(pi))
1933 port_is_started(portid_t port_id)
1935 if (port_id_is_invalid(port_id, ENABLED_WARN))
1938 if (ports[port_id].port_status != RTE_PORT_STARTED)
1945 port_is_closed(portid_t port_id)
1947 if (port_id_is_invalid(port_id, ENABLED_WARN))
1950 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1957 start_port(portid_t pid)
1959 int diag, need_check_link_status = -1;
1962 struct rte_port *port;
1963 struct ether_addr mac_addr;
1964 enum rte_eth_event_type event_type;
1966 if (port_id_is_invalid(pid, ENABLED_WARN))
1971 RTE_ETH_FOREACH_DEV(pi) {
1972 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1975 need_check_link_status = 0;
1977 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1978 RTE_PORT_HANDLING) == 0) {
1979 printf("Port %d is now not stopped\n", pi);
1983 if (port->need_reconfig > 0) {
1984 port->need_reconfig = 0;
1986 if (flow_isolate_all) {
1987 int ret = port_flow_isolate(pi, 1);
1989 printf("Failed to apply isolated"
1990 " mode on port %d\n", pi);
1995 printf("Configuring Port %d (socket %u)\n", pi,
1997 /* configure port */
1998 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2001 if (rte_atomic16_cmpset(&(port->port_status),
2002 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2003 printf("Port %d can not be set back "
2004 "to stopped\n", pi);
2005 printf("Fail to configure port %d\n", pi);
2006 /* try to reconfigure port next time */
2007 port->need_reconfig = 1;
2011 if (port->need_reconfig_queues > 0) {
2012 port->need_reconfig_queues = 0;
2013 /* setup tx queues */
2014 for (qi = 0; qi < nb_txq; qi++) {
2015 if ((numa_support) &&
2016 (txring_numa[pi] != NUMA_NO_CONFIG))
2017 diag = rte_eth_tx_queue_setup(pi, qi,
2018 port->nb_tx_desc[qi],
2020 &(port->tx_conf[qi]));
2022 diag = rte_eth_tx_queue_setup(pi, qi,
2023 port->nb_tx_desc[qi],
2025 &(port->tx_conf[qi]));
2030 /* Fail to setup tx queue, return */
2031 if (rte_atomic16_cmpset(&(port->port_status),
2033 RTE_PORT_STOPPED) == 0)
2034 printf("Port %d can not be set back "
2035 "to stopped\n", pi);
2036 printf("Fail to configure port %d tx queues\n",
2038 /* try to reconfigure queues next time */
2039 port->need_reconfig_queues = 1;
2042 for (qi = 0; qi < nb_rxq; qi++) {
2043 /* setup rx queues */
2044 if ((numa_support) &&
2045 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2046 struct rte_mempool * mp =
2047 mbuf_pool_find(rxring_numa[pi]);
2049 printf("Failed to setup RX queue:"
2050 "No mempool allocation"
2051 " on the socket %d\n",
2056 diag = rte_eth_rx_queue_setup(pi, qi,
2057 port->nb_rx_desc[qi],
2059 &(port->rx_conf[qi]),
2062 struct rte_mempool *mp =
2063 mbuf_pool_find(port->socket_id);
2065 printf("Failed to setup RX queue:"
2066 "No mempool allocation"
2067 " on the socket %d\n",
2071 diag = rte_eth_rx_queue_setup(pi, qi,
2072 port->nb_rx_desc[qi],
2074 &(port->rx_conf[qi]),
2080 /* Fail to setup rx queue, return */
2081 if (rte_atomic16_cmpset(&(port->port_status),
2083 RTE_PORT_STOPPED) == 0)
2084 printf("Port %d can not be set back "
2085 "to stopped\n", pi);
2086 printf("Fail to configure port %d rx queues\n",
2088 /* try to reconfigure queues next time */
2089 port->need_reconfig_queues = 1;
2095 if (rte_eth_dev_start(pi) < 0) {
2096 printf("Fail to start port %d\n", pi);
2098 /* Fail to setup rx queue, return */
2099 if (rte_atomic16_cmpset(&(port->port_status),
2100 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2101 printf("Port %d can not be set back to "
2106 if (rte_atomic16_cmpset(&(port->port_status),
2107 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2108 printf("Port %d can not be set into started\n", pi);
2110 rte_eth_macaddr_get(pi, &mac_addr);
2111 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2112 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2113 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2114 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2116 /* at least one port started, need checking link status */
2117 need_check_link_status = 1;
2120 for (event_type = RTE_ETH_EVENT_UNKNOWN;
2121 event_type < RTE_ETH_EVENT_MAX;
2123 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2128 printf("Failed to setup even callback for event %d\n",
2134 if (need_check_link_status == 1 && !no_link_check)
2135 check_all_ports_link_status(RTE_PORT_ALL);
2136 else if (need_check_link_status == 0)
2137 printf("Please stop the ports first\n");
2144 stop_port(portid_t pid)
2147 struct rte_port *port;
2148 int need_check_link_status = 0;
2155 if (port_id_is_invalid(pid, ENABLED_WARN))
2158 printf("Stopping ports...\n");
2160 RTE_ETH_FOREACH_DEV(pi) {
2161 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2164 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2165 printf("Please remove port %d from forwarding configuration.\n", pi);
2169 if (port_is_bonding_slave(pi)) {
2170 printf("Please remove port %d from bonded device.\n", pi);
2175 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2176 RTE_PORT_HANDLING) == 0)
2179 rte_eth_dev_stop(pi);
2181 if (rte_atomic16_cmpset(&(port->port_status),
2182 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2183 printf("Port %d can not be set into stopped\n", pi);
2184 need_check_link_status = 1;
2186 if (need_check_link_status && !no_link_check)
2187 check_all_ports_link_status(RTE_PORT_ALL);
2193 close_port(portid_t pid)
2196 struct rte_port *port;
2198 if (port_id_is_invalid(pid, ENABLED_WARN))
2201 printf("Closing ports...\n");
2203 RTE_ETH_FOREACH_DEV(pi) {
2204 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2207 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208 printf("Please remove port %d from forwarding configuration.\n", pi);
2212 if (port_is_bonding_slave(pi)) {
2213 printf("Please remove port %d from bonded device.\n", pi);
2218 if (rte_atomic16_cmpset(&(port->port_status),
2219 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220 printf("Port %d is already closed\n", pi);
2224 if (rte_atomic16_cmpset(&(port->port_status),
2225 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226 printf("Port %d is now not stopped\n", pi);
2230 if (port->flow_list)
2231 port_flow_flush(pi);
2232 rte_eth_dev_close(pi);
2234 if (rte_atomic16_cmpset(&(port->port_status),
2235 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2236 printf("Port %d cannot be set to closed\n", pi);
2243 reset_port(portid_t pid)
2247 struct rte_port *port;
2249 if (port_id_is_invalid(pid, ENABLED_WARN))
2252 printf("Resetting ports...\n");
2254 RTE_ETH_FOREACH_DEV(pi) {
2255 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2258 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2259 printf("Please remove port %d from forwarding "
2260 "configuration.\n", pi);
2264 if (port_is_bonding_slave(pi)) {
2265 printf("Please remove port %d from bonded device.\n",
2270 diag = rte_eth_dev_reset(pi);
2273 port->need_reconfig = 1;
2274 port->need_reconfig_queues = 1;
2276 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2284 eth_dev_event_callback_register(void)
2288 /* register the device event callback */
2289 ret = rte_dev_event_callback_register(NULL,
2290 eth_dev_event_callback, NULL);
2292 printf("Failed to register device event callback\n");
2301 eth_dev_event_callback_unregister(void)
2305 /* unregister the device event callback */
2306 ret = rte_dev_event_callback_unregister(NULL,
2307 eth_dev_event_callback, NULL);
2309 printf("Failed to unregister device event callback\n");
2317 attach_port(char *identifier)
2320 unsigned int socket_id;
2322 printf("Attaching a new port...\n");
2324 if (identifier == NULL) {
2325 printf("Invalid parameters are specified\n");
2329 if (rte_eth_dev_attach(identifier, &pi))
2332 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2333 /* if socket_id is invalid, set to 0 */
2334 if (check_socket_id(socket_id) < 0)
2336 reconfig(pi, socket_id);
2337 rte_eth_promiscuous_enable(pi);
2339 ports_ids[nb_ports] = pi;
2340 nb_ports = rte_eth_dev_count_avail();
2342 ports[pi].port_status = RTE_PORT_STOPPED;
2344 update_fwd_ports(pi);
2346 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2351 detach_port(portid_t port_id)
2353 char name[RTE_ETH_NAME_MAX_LEN];
2356 printf("Detaching a port...\n");
2358 if (!port_is_closed(port_id)) {
2359 printf("Please close port first\n");
2363 if (ports[port_id].flow_list)
2364 port_flow_flush(port_id);
2366 if (rte_eth_dev_detach(port_id, name)) {
2367 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2371 for (i = 0; i < nb_ports; i++) {
2372 if (ports_ids[i] == port_id) {
2373 ports_ids[i] = ports_ids[nb_ports-1];
2374 ports_ids[nb_ports-1] = 0;
2378 nb_ports = rte_eth_dev_count_avail();
2380 update_fwd_ports(RTE_MAX_ETHPORTS);
2382 printf("Port %u is detached. Now total ports is %d\n",
2391 struct rte_device *device;
2396 stop_packet_forwarding();
2398 if (ports != NULL) {
2400 RTE_ETH_FOREACH_DEV(pt_id) {
2401 printf("\nShutting down port %d...\n", pt_id);
2407 * This is a workaround to fix a virtio-user issue that
2408 * requires to call clean-up routine to remove existing
2410 * This workaround valid only for testpmd, needs a fix
2411 * valid for all applications.
2412 * TODO: Implement proper resource cleanup
2414 device = rte_eth_devices[pt_id].device;
2415 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2421 ret = rte_dev_event_monitor_stop();
2424 "fail to stop device event monitor.");
2426 ret = eth_dev_event_callback_unregister();
2429 "fail to unregister all event callbacks.");
2432 printf("\nBye...\n");
2435 typedef void (*cmd_func_t)(void);
2436 struct pmd_test_command {
2437 const char *cmd_name;
2438 cmd_func_t cmd_func;
2441 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2443 /* Check the link status of all ports in up to 9s, and print them finally */
2445 check_all_ports_link_status(uint32_t port_mask)
2447 #define CHECK_INTERVAL 100 /* 100ms */
2448 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2450 uint8_t count, all_ports_up, print_flag = 0;
2451 struct rte_eth_link link;
2453 printf("Checking link statuses...\n");
2455 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2457 RTE_ETH_FOREACH_DEV(portid) {
2458 if ((port_mask & (1 << portid)) == 0)
2460 memset(&link, 0, sizeof(link));
2461 rte_eth_link_get_nowait(portid, &link);
2462 /* print link status if flag set */
2463 if (print_flag == 1) {
2464 if (link.link_status)
2466 "Port%d Link Up. speed %u Mbps- %s\n",
2467 portid, link.link_speed,
2468 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2469 ("full-duplex") : ("half-duplex\n"));
2471 printf("Port %d Link Down\n", portid);
2474 /* clear all_ports_up flag if any link down */
2475 if (link.link_status == ETH_LINK_DOWN) {
2480 /* after finally printing all link status, get out */
2481 if (print_flag == 1)
2484 if (all_ports_up == 0) {
2486 rte_delay_ms(CHECK_INTERVAL);
2489 /* set the print_flag if all ports up or timeout */
2490 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2500 rmv_event_callback(void *arg)
2502 int need_to_start = 0;
2503 int org_no_link_check = no_link_check;
2504 portid_t port_id = (intptr_t)arg;
2506 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2508 if (!test_done && port_is_forwarding(port_id)) {
2510 stop_packet_forwarding();
2514 no_link_check = org_no_link_check;
2515 close_port(port_id);
2516 detach_port(port_id);
2518 start_packet_forwarding(0);
2521 /* This function is used by the interrupt thread */
2523 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2526 static const char * const event_desc[] = {
2527 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2528 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2529 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2530 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2531 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2532 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2533 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2534 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2535 [RTE_ETH_EVENT_NEW] = "device probed",
2536 [RTE_ETH_EVENT_DESTROY] = "device released",
2537 [RTE_ETH_EVENT_MAX] = NULL,
2540 RTE_SET_USED(param);
2541 RTE_SET_USED(ret_param);
2543 if (type >= RTE_ETH_EVENT_MAX) {
2544 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2545 port_id, __func__, type);
2547 } else if (event_print_mask & (UINT32_C(1) << type)) {
2548 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2553 if (port_id_is_invalid(port_id, DISABLED_WARN))
2557 case RTE_ETH_EVENT_INTR_RMV:
2558 if (rte_eal_alarm_set(100000,
2559 rmv_event_callback, (void *)(intptr_t)port_id))
2560 fprintf(stderr, "Could not set up deferred device removal\n");
2568 /* This function is used by the interrupt thread */
2570 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2571 __rte_unused void *arg)
2573 if (type >= RTE_DEV_EVENT_MAX) {
2574 fprintf(stderr, "%s called upon invalid event %d\n",
2580 case RTE_DEV_EVENT_REMOVE:
2581 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2583 /* TODO: After finish failure handle, begin to stop
2584 * packet forward, stop port, close port, detach port.
2587 case RTE_DEV_EVENT_ADD:
2588 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2590 /* TODO: After finish kernel driver binding,
2591 * begin to attach port.
2600 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2604 uint8_t mapping_found = 0;
2606 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2607 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2608 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2609 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2610 tx_queue_stats_mappings[i].queue_id,
2611 tx_queue_stats_mappings[i].stats_counter_id);
2618 port->tx_queue_stats_mapping_enabled = 1;
2623 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2627 uint8_t mapping_found = 0;
2629 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2630 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2631 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2632 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2633 rx_queue_stats_mappings[i].queue_id,
2634 rx_queue_stats_mappings[i].stats_counter_id);
2641 port->rx_queue_stats_mapping_enabled = 1;
2646 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2650 diag = set_tx_queue_stats_mapping_registers(pi, port);
2652 if (diag == -ENOTSUP) {
2653 port->tx_queue_stats_mapping_enabled = 0;
2654 printf("TX queue stats mapping not supported port id=%d\n", pi);
2657 rte_exit(EXIT_FAILURE,
2658 "set_tx_queue_stats_mapping_registers "
2659 "failed for port id=%d diag=%d\n",
2663 diag = set_rx_queue_stats_mapping_registers(pi, port);
2665 if (diag == -ENOTSUP) {
2666 port->rx_queue_stats_mapping_enabled = 0;
2667 printf("RX queue stats mapping not supported port id=%d\n", pi);
2670 rte_exit(EXIT_FAILURE,
2671 "set_rx_queue_stats_mapping_registers "
2672 "failed for port id=%d diag=%d\n",
2678 rxtx_port_config(struct rte_port *port)
2682 for (qid = 0; qid < nb_rxq; qid++) {
2683 port->rx_conf[qid] = port->dev_info.default_rxconf;
2685 /* Check if any Rx parameters have been passed */
2686 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2687 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2689 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2690 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2692 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2693 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2695 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2696 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2698 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2699 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2701 port->nb_rx_desc[qid] = nb_rxd;
2704 for (qid = 0; qid < nb_txq; qid++) {
2705 port->tx_conf[qid] = port->dev_info.default_txconf;
2707 /* Check if any Tx parameters have been passed */
2708 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2709 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2711 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2712 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2714 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2715 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2717 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2718 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2720 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2721 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2723 port->nb_tx_desc[qid] = nb_txd;
2728 init_port_config(void)
2731 struct rte_port *port;
2733 RTE_ETH_FOREACH_DEV(pid) {
2735 port->dev_conf.fdir_conf = fdir_conf;
2736 rte_eth_dev_info_get(pid, &port->dev_info);
2738 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2739 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2740 rss_hf & port->dev_info.flow_type_rss_offloads;
2742 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2743 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2746 if (port->dcb_flag == 0) {
2747 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2748 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2750 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2753 rxtx_port_config(port);
2755 rte_eth_macaddr_get(pid, &port->eth_addr);
2757 map_port_queue_stats_mapping_registers(pid, port);
2758 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2759 rte_pmd_ixgbe_bypass_init(pid);
2762 if (lsc_interrupt &&
2763 (rte_eth_devices[pid].data->dev_flags &
2764 RTE_ETH_DEV_INTR_LSC))
2765 port->dev_conf.intr_conf.lsc = 1;
2766 if (rmv_interrupt &&
2767 (rte_eth_devices[pid].data->dev_flags &
2768 RTE_ETH_DEV_INTR_RMV))
2769 port->dev_conf.intr_conf.rmv = 1;
2773 void set_port_slave_flag(portid_t slave_pid)
2775 struct rte_port *port;
2777 port = &ports[slave_pid];
2778 port->slave_flag = 1;
2781 void clear_port_slave_flag(portid_t slave_pid)
2783 struct rte_port *port;
2785 port = &ports[slave_pid];
2786 port->slave_flag = 0;
2789 uint8_t port_is_bonding_slave(portid_t slave_pid)
2791 struct rte_port *port;
2793 port = &ports[slave_pid];
2794 if ((rte_eth_devices[slave_pid].data->dev_flags &
2795 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2800 const uint16_t vlan_tags[] = {
2801 0, 1, 2, 3, 4, 5, 6, 7,
2802 8, 9, 10, 11, 12, 13, 14, 15,
2803 16, 17, 18, 19, 20, 21, 22, 23,
2804 24, 25, 26, 27, 28, 29, 30, 31
2808 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2809 enum dcb_mode_enable dcb_mode,
2810 enum rte_eth_nb_tcs num_tcs,
2815 struct rte_eth_rss_conf rss_conf;
2818 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2819 * given above, and the number of traffic classes available for use.
2821 if (dcb_mode == DCB_VT_ENABLED) {
2822 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2823 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2824 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2825 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2827 /* VMDQ+DCB RX and TX configurations */
2828 vmdq_rx_conf->enable_default_pool = 0;
2829 vmdq_rx_conf->default_pool = 0;
2830 vmdq_rx_conf->nb_queue_pools =
2831 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2832 vmdq_tx_conf->nb_queue_pools =
2833 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2835 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2836 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2837 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2838 vmdq_rx_conf->pool_map[i].pools =
2839 1 << (i % vmdq_rx_conf->nb_queue_pools);
2841 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2842 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2843 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2846 /* set DCB mode of RX and TX of multiple queues */
2847 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2848 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2850 struct rte_eth_dcb_rx_conf *rx_conf =
2851 ð_conf->rx_adv_conf.dcb_rx_conf;
2852 struct rte_eth_dcb_tx_conf *tx_conf =
2853 ð_conf->tx_adv_conf.dcb_tx_conf;
2855 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2859 rx_conf->nb_tcs = num_tcs;
2860 tx_conf->nb_tcs = num_tcs;
2862 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2863 rx_conf->dcb_tc[i] = i % num_tcs;
2864 tx_conf->dcb_tc[i] = i % num_tcs;
2867 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2868 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2869 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2873 eth_conf->dcb_capability_en =
2874 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2876 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2882 init_port_dcb_config(portid_t pid,
2883 enum dcb_mode_enable dcb_mode,
2884 enum rte_eth_nb_tcs num_tcs,
2887 struct rte_eth_conf port_conf;
2888 struct rte_port *rte_port;
2892 rte_port = &ports[pid];
2894 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2895 /* Enter DCB configuration status */
2898 port_conf.rxmode = rte_port->dev_conf.rxmode;
2899 port_conf.txmode = rte_port->dev_conf.txmode;
2901 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2902 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2905 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2907 /* re-configure the device . */
2908 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2910 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2912 /* If dev_info.vmdq_pool_base is greater than 0,
2913 * the queue id of vmdq pools is started after pf queues.
2915 if (dcb_mode == DCB_VT_ENABLED &&
2916 rte_port->dev_info.vmdq_pool_base > 0) {
2917 printf("VMDQ_DCB multi-queue mode is nonsensical"
2918 " for port %d.", pid);
2922 /* Assume the ports in testpmd have the same dcb capability
2923 * and has the same number of rxq and txq in dcb mode
2925 if (dcb_mode == DCB_VT_ENABLED) {
2926 if (rte_port->dev_info.max_vfs > 0) {
2927 nb_rxq = rte_port->dev_info.nb_rx_queues;
2928 nb_txq = rte_port->dev_info.nb_tx_queues;
2930 nb_rxq = rte_port->dev_info.max_rx_queues;
2931 nb_txq = rte_port->dev_info.max_tx_queues;
2934 /*if vt is disabled, use all pf queues */
2935 if (rte_port->dev_info.vmdq_pool_base == 0) {
2936 nb_rxq = rte_port->dev_info.max_rx_queues;
2937 nb_txq = rte_port->dev_info.max_tx_queues;
2939 nb_rxq = (queueid_t)num_tcs;
2940 nb_txq = (queueid_t)num_tcs;
2944 rx_free_thresh = 64;
2946 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2948 rxtx_port_config(rte_port);
2950 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2951 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2952 rx_vft_set(pid, vlan_tags[i], 1);
2954 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2955 map_port_queue_stats_mapping_registers(pid, rte_port);
2957 rte_port->dcb_flag = 1;
2965 /* Configuration of Ethernet ports. */
2966 ports = rte_zmalloc("testpmd: ports",
2967 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2968 RTE_CACHE_LINE_SIZE);
2969 if (ports == NULL) {
2970 rte_exit(EXIT_FAILURE,
2971 "rte_zmalloc(%d struct rte_port) failed\n",
2987 const char clr[] = { 27, '[', '2', 'J', '\0' };
2988 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2990 /* Clear screen and move to top left */
2991 printf("%s%s", clr, top_left);
2993 printf("\nPort statistics ====================================");
2994 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2995 nic_stats_display(fwd_ports_ids[i]);
2999 signal_handler(int signum)
3001 if (signum == SIGINT || signum == SIGTERM) {
3002 printf("\nSignal %d received, preparing to exit...\n",
3004 #ifdef RTE_LIBRTE_PDUMP
3005 /* uninitialize packet capture framework */
3008 #ifdef RTE_LIBRTE_LATENCY_STATS
3009 rte_latencystats_uninit();
3012 /* Set flag to indicate the force termination. */
3014 /* exit with the expected status */
3015 signal(signum, SIG_DFL);
3016 kill(getpid(), signum);
3021 main(int argc, char** argv)
3028 signal(SIGINT, signal_handler);
3029 signal(SIGTERM, signal_handler);
3031 diag = rte_eal_init(argc, argv);
3033 rte_panic("Cannot init EAL\n");
3035 testpmd_logtype = rte_log_register("testpmd");
3036 if (testpmd_logtype < 0)
3037 rte_panic("Cannot register log type");
3038 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3040 #ifdef RTE_LIBRTE_PDUMP
3041 /* initialize packet capture framework */
3042 rte_pdump_init(NULL);
3046 RTE_ETH_FOREACH_DEV(port_id) {
3047 ports_ids[count] = port_id;
3050 nb_ports = (portid_t) count;
3052 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3054 /* allocate port structures, and init them */
3057 set_def_fwd_config();
3059 rte_panic("Empty set of forwarding logical cores - check the "
3060 "core mask supplied in the command parameters\n");
3062 /* Bitrate/latency stats disabled by default */
3063 #ifdef RTE_LIBRTE_BITRATE
3064 bitrate_enabled = 0;
3066 #ifdef RTE_LIBRTE_LATENCY_STATS
3067 latencystats_enabled = 0;
3070 /* on FreeBSD, mlockall() is disabled by default */
3071 #ifdef RTE_EXEC_ENV_BSDAPP
3080 launch_args_parse(argc, argv);
3082 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3083 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3087 if (tx_first && interactive)
3088 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3089 "interactive mode.\n");
3091 if (tx_first && lsc_interrupt) {
3092 printf("Warning: lsc_interrupt needs to be off when "
3093 " using tx_first. Disabling.\n");
3097 if (!nb_rxq && !nb_txq)
3098 printf("Warning: Either rx or tx queues should be non-zero\n");
3100 if (nb_rxq > 1 && nb_rxq > nb_txq)
3101 printf("Warning: nb_rxq=%d enables RSS configuration, "
3102 "but nb_txq=%d will prevent to fully test it.\n",
3108 /* enable hot plug monitoring */
3109 ret = rte_dev_event_monitor_start();
3114 eth_dev_event_callback_register();
3118 if (start_port(RTE_PORT_ALL) != 0)
3119 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3121 /* set all ports to promiscuous mode by default */
3122 RTE_ETH_FOREACH_DEV(port_id)
3123 rte_eth_promiscuous_enable(port_id);
3125 /* Init metrics library */
3126 rte_metrics_init(rte_socket_id());
3128 #ifdef RTE_LIBRTE_LATENCY_STATS
3129 if (latencystats_enabled != 0) {
3130 int ret = rte_latencystats_init(1, NULL);
3132 printf("Warning: latencystats init()"
3133 " returned error %d\n", ret);
3134 printf("Latencystats running on lcore %d\n",
3135 latencystats_lcore_id);
3139 /* Setup bitrate stats */
3140 #ifdef RTE_LIBRTE_BITRATE
3141 if (bitrate_enabled != 0) {
3142 bitrate_data = rte_stats_bitrate_create();
3143 if (bitrate_data == NULL)
3144 rte_exit(EXIT_FAILURE,
3145 "Could not allocate bitrate data.\n");
3146 rte_stats_bitrate_reg(bitrate_data);
3150 #ifdef RTE_LIBRTE_CMDLINE
3151 if (strlen(cmdline_filename) != 0)
3152 cmdline_read_from_file(cmdline_filename);
3154 if (interactive == 1) {
3156 printf("Start automatic packet forwarding\n");
3157 start_packet_forwarding(0);
3169 printf("No commandline core given, start packet forwarding\n");
3170 start_packet_forwarding(tx_first);
3171 if (stats_period != 0) {
3172 uint64_t prev_time = 0, cur_time, diff_time = 0;
3173 uint64_t timer_period;
3175 /* Convert to number of cycles */
3176 timer_period = stats_period * rte_get_timer_hz();
3178 while (f_quit == 0) {
3179 cur_time = rte_get_timer_cycles();
3180 diff_time += cur_time - prev_time;
3182 if (diff_time >= timer_period) {
3184 /* Reset the timer */
3187 /* Sleep to avoid unnecessary checks */
3188 prev_time = cur_time;
3193 printf("Press enter to exit\n");
3194 rc = read(0, &c, 1);