1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
230 * Configurable number of RX/TX queues.
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
236 * Configurable number of RX/TX ring descriptors.
237 * Defaults are supplied by drivers via ethdev.
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
244 #define RTE_PMD_PARAM_UNSET -1
246 * Configurable values of RX and TX ring threshold registers.
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of RX free threshold.
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of RX drop enable.
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
268 * Configurable value of TX free threshold.
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
273 * Configurable value of TX RS bit threshold.
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
278 * Configurable value of buffered packets before sending.
280 uint16_t noisy_tx_sw_bufsz;
283 * Configurable value of packet buffer timeout.
285 uint16_t noisy_tx_sw_buf_flush_time;
288 * Configurable value for size of VNF internal memory area
289 * used for simulating noisy neighbour behaviour
291 uint64_t noisy_lkup_mem_sz;
294 * Configurable value of number of random writes done in
295 * VNF simulation memory area.
297 uint64_t noisy_lkup_num_writes;
300 * Configurable value of number of random reads done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_reads;
306 * Configurable value of number of random reads/writes done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads_writes;
312 * Receive Side Scaling (RSS) configuration.
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
317 * Port topology configuration
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
322 * Avoids to flush all the RX streams before starts forwarding.
324 uint8_t no_flush_rx = 0; /* flush by default */
327 * Flow API isolated mode.
329 uint8_t flow_isolate_all;
332 * Avoids to check link status when starting/stopping a port.
334 uint8_t no_link_check = 0; /* check by default */
337 * Enable link status change notification
339 uint8_t lsc_interrupt = 1; /* enabled by default */
342 * Enable device removal notification.
344 uint8_t rmv_interrupt = 1; /* enabled by default */
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
349 * Display or mask ether events
350 * Default to all events except VF_MBOX
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
360 * Decide if all memory are locked for performance.
365 * NIC bypass mode configuration options.
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
374 #ifdef RTE_LIBRTE_LATENCY_STATS
377 * Set when latency stats is enabled in the commandline
379 uint8_t latencystats_enabled;
382 * Lcore ID to serive latency statistics.
384 lcoreid_t latencystats_lcore_id = -1;
389 * Ethernet device configuration.
391 struct rte_eth_rxmode rx_mode = {
392 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
395 struct rte_eth_txmode tx_mode = {
396 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
399 struct rte_fdir_conf fdir_conf = {
400 .mode = RTE_FDIR_MODE_NONE,
401 .pballoc = RTE_FDIR_PBALLOC_64K,
402 .status = RTE_FDIR_REPORT_STATUS,
404 .vlan_tci_mask = 0xFFEF,
406 .src_ip = 0xFFFFFFFF,
407 .dst_ip = 0xFFFFFFFF,
410 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
413 .src_port_mask = 0xFFFF,
414 .dst_port_mask = 0xFFFF,
415 .mac_addr_byte_mask = 0xFF,
416 .tunnel_type_mask = 1,
417 .tunnel_id_mask = 0xFFFFFFFF,
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
434 * Display zero values by default for xstats
436 uint8_t xstats_hide_zero;
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
451 struct vxlan_encap_conf vxlan_encap_conf = {
454 .vni = "\x00\x00\x00",
456 .udp_dst = RTE_BE16(4789),
457 .ipv4_src = IPv4(127, 0, 0, 1),
458 .ipv4_dst = IPv4(255, 255, 255, 255),
459 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460 "\x00\x00\x00\x00\x00\x00\x00\x01",
461 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462 "\x00\x00\x00\x00\x00\x00\x11\x11",
464 .eth_src = "\x00\x00\x00\x00\x00\x00",
465 .eth_dst = "\xff\xff\xff\xff\xff\xff",
468 struct nvgre_encap_conf nvgre_encap_conf = {
471 .tni = "\x00\x00\x00",
472 .ipv4_src = IPv4(127, 0, 0, 1),
473 .ipv4_dst = IPv4(255, 255, 255, 255),
474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 "\x00\x00\x00\x00\x00\x00\x00\x01",
476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 "\x00\x00\x00\x00\x00\x00\x11\x11",
479 .eth_src = "\x00\x00\x00\x00\x00\x00",
480 .eth_dst = "\xff\xff\xff\xff\xff\xff",
483 /* Forward function declarations */
484 static void map_port_queue_stats_mapping_registers(portid_t pi,
485 struct rte_port *port);
486 static void check_all_ports_link_status(uint32_t port_mask);
487 static int eth_event_callback(portid_t port_id,
488 enum rte_eth_event_type type,
489 void *param, void *ret_param);
490 static void eth_dev_event_callback(const char *device_name,
491 enum rte_dev_event_type type,
495 * Check if all the ports are started.
496 * If yes, return positive value. If not, return zero.
498 static int all_ports_started(void);
500 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
501 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
504 * Helper function to check if socket is already discovered.
505 * If yes, return positive value. If not, return zero.
508 new_socket_id(unsigned int socket_id)
512 for (i = 0; i < num_sockets; i++) {
513 if (socket_ids[i] == socket_id)
520 * Setup default configuration.
523 set_default_fwd_lcores_config(void)
527 unsigned int sock_num;
530 for (i = 0; i < RTE_MAX_LCORE; i++) {
531 if (!rte_lcore_is_enabled(i))
533 sock_num = rte_lcore_to_socket_id(i);
534 if (new_socket_id(sock_num)) {
535 if (num_sockets >= RTE_MAX_NUMA_NODES) {
536 rte_exit(EXIT_FAILURE,
537 "Total sockets greater than %u\n",
540 socket_ids[num_sockets++] = sock_num;
542 if (i == rte_get_master_lcore())
544 fwd_lcores_cpuids[nb_lc++] = i;
546 nb_lcores = (lcoreid_t) nb_lc;
547 nb_cfg_lcores = nb_lcores;
552 set_def_peer_eth_addrs(void)
556 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
557 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
558 peer_eth_addrs[i].addr_bytes[5] = i;
563 set_default_fwd_ports_config(void)
568 RTE_ETH_FOREACH_DEV(pt_id) {
569 fwd_ports_ids[i++] = pt_id;
571 /* Update sockets info according to the attached device */
572 int socket_id = rte_eth_dev_socket_id(pt_id);
573 if (socket_id >= 0 && new_socket_id(socket_id)) {
574 if (num_sockets >= RTE_MAX_NUMA_NODES) {
575 rte_exit(EXIT_FAILURE,
576 "Total sockets greater than %u\n",
579 socket_ids[num_sockets++] = socket_id;
583 nb_cfg_ports = nb_ports;
584 nb_fwd_ports = nb_ports;
588 set_def_fwd_config(void)
590 set_default_fwd_lcores_config();
591 set_def_peer_eth_addrs();
592 set_default_fwd_ports_config();
595 /* extremely pessimistic estimation of memory required to create a mempool */
597 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
599 unsigned int n_pages, mbuf_per_pg, leftover;
600 uint64_t total_mem, mbuf_mem, obj_sz;
602 /* there is no good way to predict how much space the mempool will
603 * occupy because it will allocate chunks on the fly, and some of those
604 * will come from default DPDK memory while some will come from our
605 * external memory, so just assume 128MB will be enough for everyone.
607 uint64_t hdr_mem = 128 << 20;
609 /* account for possible non-contiguousness */
610 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
612 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
616 mbuf_per_pg = pgsz / obj_sz;
617 leftover = (nb_mbufs % mbuf_per_pg) > 0;
618 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
620 mbuf_mem = n_pages * pgsz;
622 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
624 if (total_mem > SIZE_MAX) {
625 TESTPMD_LOG(ERR, "Memory size too big\n");
628 *out = (size_t)total_mem;
633 static inline uint32_t
636 return (uint32_t)__builtin_ctzll(v);
639 static inline uint32_t
644 v = rte_align64pow2(v);
649 pagesz_flags(uint64_t page_sz)
651 /* as per mmap() manpage, all page sizes are log2 of page size
652 * shifted by MAP_HUGE_SHIFT
654 int log2 = log2_u64(page_sz);
656 return (log2 << HUGE_SHIFT);
660 alloc_mem(size_t memsz, size_t pgsz, bool huge)
665 /* allocate anonymous hugepages */
666 flags = MAP_ANONYMOUS | MAP_PRIVATE;
668 flags |= HUGE_FLAG | pagesz_flags(pgsz);
670 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
671 if (addr == MAP_FAILED)
677 struct extmem_param {
681 rte_iova_t *iova_table;
682 unsigned int iova_table_len;
686 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
689 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
690 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
691 unsigned int cur_page, n_pages, pgsz_idx;
692 size_t mem_sz, cur_pgsz;
693 rte_iova_t *iovas = NULL;
697 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
698 /* skip anything that is too big */
699 if (pgsizes[pgsz_idx] > SIZE_MAX)
702 cur_pgsz = pgsizes[pgsz_idx];
704 /* if we were told not to allocate hugepages, override */
706 cur_pgsz = sysconf(_SC_PAGESIZE);
708 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
710 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
714 /* allocate our memory */
715 addr = alloc_mem(mem_sz, cur_pgsz, huge);
717 /* if we couldn't allocate memory with a specified page size,
718 * that doesn't mean we can't do it with other page sizes, so
724 /* store IOVA addresses for every page in this memory area */
725 n_pages = mem_sz / cur_pgsz;
727 iovas = malloc(sizeof(*iovas) * n_pages);
730 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
733 /* lock memory if it's not huge pages */
737 /* populate IOVA addresses */
738 for (cur_page = 0; cur_page < n_pages; cur_page++) {
743 offset = cur_pgsz * cur_page;
744 cur = RTE_PTR_ADD(addr, offset);
746 /* touch the page before getting its IOVA */
747 *(volatile char *)cur = 0;
749 iova = rte_mem_virt2iova(cur);
751 iovas[cur_page] = iova;
756 /* if we couldn't allocate anything */
762 param->pgsz = cur_pgsz;
763 param->iova_table = iovas;
764 param->iova_table_len = n_pages;
771 munmap(addr, mem_sz);
777 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
779 struct extmem_param param;
782 memset(¶m, 0, sizeof(param));
784 /* check if our heap exists */
785 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
787 /* create our heap */
788 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
790 TESTPMD_LOG(ERR, "Cannot create heap\n");
795 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
797 TESTPMD_LOG(ERR, "Cannot create memory area\n");
801 /* we now have a valid memory area, so add it to heap */
802 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
803 param.addr, param.len, param.iova_table,
804 param.iova_table_len, param.pgsz);
806 /* when using VFIO, memory is automatically mapped for DMA by EAL */
808 /* not needed any more */
809 free(param.iova_table);
812 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
813 munmap(param.addr, param.len);
819 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
826 * Configuration initialisation done once at init time.
829 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
830 unsigned int socket_id)
832 char pool_name[RTE_MEMPOOL_NAMESIZE];
833 struct rte_mempool *rte_mp = NULL;
836 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
837 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
840 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
841 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
843 switch (mp_alloc_type) {
844 case MP_ALLOC_NATIVE:
846 /* wrapper to rte_mempool_create() */
847 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
848 rte_mbuf_best_mempool_ops());
849 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
850 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
855 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
856 mb_size, (unsigned int) mb_mempool_cache,
857 sizeof(struct rte_pktmbuf_pool_private),
862 if (rte_mempool_populate_anon(rte_mp) == 0) {
863 rte_mempool_free(rte_mp);
867 rte_pktmbuf_pool_init(rte_mp, NULL);
868 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
872 case MP_ALLOC_XMEM_HUGE:
875 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
877 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
878 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
881 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
883 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
885 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
886 rte_mbuf_best_mempool_ops());
887 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
888 mb_mempool_cache, 0, mbuf_seg_size,
894 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
899 if (rte_mp == NULL) {
900 rte_exit(EXIT_FAILURE,
901 "Creation of mbuf pool for socket %u failed: %s\n",
902 socket_id, rte_strerror(rte_errno));
903 } else if (verbose_level > 0) {
904 rte_mempool_dump(stdout, rte_mp);
909 * Check given socket id is valid or not with NUMA mode,
910 * if valid, return 0, else return -1
913 check_socket_id(const unsigned int socket_id)
915 static int warning_once = 0;
917 if (new_socket_id(socket_id)) {
918 if (!warning_once && numa_support)
919 printf("Warning: NUMA should be configured manually by"
920 " using --port-numa-config and"
921 " --ring-numa-config parameters along with"
930 * Get the allowed maximum number of RX queues.
931 * *pid return the port id which has minimal value of
932 * max_rx_queues in all ports.
935 get_allowed_max_nb_rxq(portid_t *pid)
937 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
939 struct rte_eth_dev_info dev_info;
941 RTE_ETH_FOREACH_DEV(pi) {
942 rte_eth_dev_info_get(pi, &dev_info);
943 if (dev_info.max_rx_queues < allowed_max_rxq) {
944 allowed_max_rxq = dev_info.max_rx_queues;
948 return allowed_max_rxq;
952 * Check input rxq is valid or not.
953 * If input rxq is not greater than any of maximum number
954 * of RX queues of all ports, it is valid.
955 * if valid, return 0, else return -1
958 check_nb_rxq(queueid_t rxq)
960 queueid_t allowed_max_rxq;
963 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
964 if (rxq > allowed_max_rxq) {
965 printf("Fail: input rxq (%u) can't be greater "
966 "than max_rx_queues (%u) of port %u\n",
976 * Get the allowed maximum number of TX queues.
977 * *pid return the port id which has minimal value of
978 * max_tx_queues in all ports.
981 get_allowed_max_nb_txq(portid_t *pid)
983 queueid_t allowed_max_txq = MAX_QUEUE_ID;
985 struct rte_eth_dev_info dev_info;
987 RTE_ETH_FOREACH_DEV(pi) {
988 rte_eth_dev_info_get(pi, &dev_info);
989 if (dev_info.max_tx_queues < allowed_max_txq) {
990 allowed_max_txq = dev_info.max_tx_queues;
994 return allowed_max_txq;
998 * Check input txq is valid or not.
999 * If input txq is not greater than any of maximum number
1000 * of TX queues of all ports, it is valid.
1001 * if valid, return 0, else return -1
1004 check_nb_txq(queueid_t txq)
1006 queueid_t allowed_max_txq;
1009 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1010 if (txq > allowed_max_txq) {
1011 printf("Fail: input txq (%u) can't be greater "
1012 "than max_tx_queues (%u) of port %u\n",
1025 struct rte_port *port;
1026 struct rte_mempool *mbp;
1027 unsigned int nb_mbuf_per_pool;
1029 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1030 struct rte_gro_param gro_param;
1034 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1036 /* Configuration of logical cores. */
1037 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1038 sizeof(struct fwd_lcore *) * nb_lcores,
1039 RTE_CACHE_LINE_SIZE);
1040 if (fwd_lcores == NULL) {
1041 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1042 "failed\n", nb_lcores);
1044 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1045 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1046 sizeof(struct fwd_lcore),
1047 RTE_CACHE_LINE_SIZE);
1048 if (fwd_lcores[lc_id] == NULL) {
1049 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1052 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1055 RTE_ETH_FOREACH_DEV(pid) {
1057 /* Apply default TxRx configuration for all ports */
1058 port->dev_conf.txmode = tx_mode;
1059 port->dev_conf.rxmode = rx_mode;
1060 rte_eth_dev_info_get(pid, &port->dev_info);
1062 if (!(port->dev_info.tx_offload_capa &
1063 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1064 port->dev_conf.txmode.offloads &=
1065 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1066 if (!(port->dev_info.tx_offload_capa &
1067 DEV_TX_OFFLOAD_MATCH_METADATA))
1068 port->dev_conf.txmode.offloads &=
1069 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1071 if (port_numa[pid] != NUMA_NO_CONFIG)
1072 port_per_socket[port_numa[pid]]++;
1074 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1077 * if socket_id is invalid,
1078 * set to the first available socket.
1080 if (check_socket_id(socket_id) < 0)
1081 socket_id = socket_ids[0];
1082 port_per_socket[socket_id]++;
1086 /* Apply Rx offloads configuration */
1087 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1088 port->rx_conf[k].offloads =
1089 port->dev_conf.rxmode.offloads;
1090 /* Apply Tx offloads configuration */
1091 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1092 port->tx_conf[k].offloads =
1093 port->dev_conf.txmode.offloads;
1095 /* set flag to initialize port/queue */
1096 port->need_reconfig = 1;
1097 port->need_reconfig_queues = 1;
1098 port->tx_metadata = 0;
1102 * Create pools of mbuf.
1103 * If NUMA support is disabled, create a single pool of mbuf in
1104 * socket 0 memory by default.
1105 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1107 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1108 * nb_txd can be configured at run time.
1110 if (param_total_num_mbufs)
1111 nb_mbuf_per_pool = param_total_num_mbufs;
1113 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1114 (nb_lcores * mb_mempool_cache) +
1115 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1116 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1122 for (i = 0; i < num_sockets; i++)
1123 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1126 if (socket_num == UMA_NO_CONFIG)
1127 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1129 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1135 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1136 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1138 * Records which Mbuf pool to use by each logical core, if needed.
1140 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1141 mbp = mbuf_pool_find(
1142 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1145 mbp = mbuf_pool_find(0);
1146 fwd_lcores[lc_id]->mbp = mbp;
1147 /* initialize GSO context */
1148 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1149 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1150 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1151 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1153 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1156 /* Configuration of packet forwarding streams. */
1157 if (init_fwd_streams() < 0)
1158 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1162 /* create a gro context for each lcore */
1163 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1164 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1165 gro_param.max_item_per_flow = MAX_PKT_BURST;
1166 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1167 gro_param.socket_id = rte_lcore_to_socket_id(
1168 fwd_lcores_cpuids[lc_id]);
1169 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1170 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1171 rte_exit(EXIT_FAILURE,
1172 "rte_gro_ctx_create() failed\n");
1176 #if defined RTE_LIBRTE_PMD_SOFTNIC
1177 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1178 RTE_ETH_FOREACH_DEV(pid) {
1180 const char *driver = port->dev_info.driver_name;
1182 if (strcmp(driver, "net_softnic") == 0)
1183 port->softport.fwd_lcore_arg = fwd_lcores;
1192 reconfig(portid_t new_port_id, unsigned socket_id)
1194 struct rte_port *port;
1196 /* Reconfiguration of Ethernet ports. */
1197 port = &ports[new_port_id];
1198 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1200 /* set flag to initialize port/queue */
1201 port->need_reconfig = 1;
1202 port->need_reconfig_queues = 1;
1203 port->socket_id = socket_id;
1210 init_fwd_streams(void)
1213 struct rte_port *port;
1214 streamid_t sm_id, nb_fwd_streams_new;
1217 /* set socket id according to numa or not */
1218 RTE_ETH_FOREACH_DEV(pid) {
1220 if (nb_rxq > port->dev_info.max_rx_queues) {
1221 printf("Fail: nb_rxq(%d) is greater than "
1222 "max_rx_queues(%d)\n", nb_rxq,
1223 port->dev_info.max_rx_queues);
1226 if (nb_txq > port->dev_info.max_tx_queues) {
1227 printf("Fail: nb_txq(%d) is greater than "
1228 "max_tx_queues(%d)\n", nb_txq,
1229 port->dev_info.max_tx_queues);
1233 if (port_numa[pid] != NUMA_NO_CONFIG)
1234 port->socket_id = port_numa[pid];
1236 port->socket_id = rte_eth_dev_socket_id(pid);
1239 * if socket_id is invalid,
1240 * set to the first available socket.
1242 if (check_socket_id(port->socket_id) < 0)
1243 port->socket_id = socket_ids[0];
1247 if (socket_num == UMA_NO_CONFIG)
1248 port->socket_id = 0;
1250 port->socket_id = socket_num;
1254 q = RTE_MAX(nb_rxq, nb_txq);
1256 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1259 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1260 if (nb_fwd_streams_new == nb_fwd_streams)
1263 if (fwd_streams != NULL) {
1264 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1265 if (fwd_streams[sm_id] == NULL)
1267 rte_free(fwd_streams[sm_id]);
1268 fwd_streams[sm_id] = NULL;
1270 rte_free(fwd_streams);
1275 nb_fwd_streams = nb_fwd_streams_new;
1276 if (nb_fwd_streams) {
1277 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1278 sizeof(struct fwd_stream *) * nb_fwd_streams,
1279 RTE_CACHE_LINE_SIZE);
1280 if (fwd_streams == NULL)
1281 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1282 " (struct fwd_stream *)) failed\n",
1285 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1286 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1287 " struct fwd_stream", sizeof(struct fwd_stream),
1288 RTE_CACHE_LINE_SIZE);
1289 if (fwd_streams[sm_id] == NULL)
1290 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1291 "(struct fwd_stream) failed\n");
1298 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1300 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1302 unsigned int total_burst;
1303 unsigned int nb_burst;
1304 unsigned int burst_stats[3];
1305 uint16_t pktnb_stats[3];
1307 int burst_percent[3];
1310 * First compute the total number of packet bursts and the
1311 * two highest numbers of bursts of the same number of packets.
1314 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1315 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1316 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1317 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1320 total_burst += nb_burst;
1321 if (nb_burst > burst_stats[0]) {
1322 burst_stats[1] = burst_stats[0];
1323 pktnb_stats[1] = pktnb_stats[0];
1324 burst_stats[0] = nb_burst;
1325 pktnb_stats[0] = nb_pkt;
1326 } else if (nb_burst > burst_stats[1]) {
1327 burst_stats[1] = nb_burst;
1328 pktnb_stats[1] = nb_pkt;
1331 if (total_burst == 0)
1333 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1334 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1335 burst_percent[0], (int) pktnb_stats[0]);
1336 if (burst_stats[0] == total_burst) {
1340 if (burst_stats[0] + burst_stats[1] == total_burst) {
1341 printf(" + %d%% of %d pkts]\n",
1342 100 - burst_percent[0], pktnb_stats[1]);
1345 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1346 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1347 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1348 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1351 printf(" + %d%% of %d pkts + %d%% of others]\n",
1352 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1354 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1357 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1359 struct rte_port *port;
1362 static const char *fwd_stats_border = "----------------------";
1364 port = &ports[port_id];
1365 printf("\n %s Forward statistics for port %-2d %s\n",
1366 fwd_stats_border, port_id, fwd_stats_border);
1368 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1369 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1371 stats->ipackets, stats->imissed,
1372 (uint64_t) (stats->ipackets + stats->imissed));
1374 if (cur_fwd_eng == &csum_fwd_engine)
1375 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1376 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1377 port->rx_bad_outer_l4_csum);
1378 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1379 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1380 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1383 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1385 stats->opackets, port->tx_dropped,
1386 (uint64_t) (stats->opackets + port->tx_dropped));
1389 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1391 stats->ipackets, stats->imissed,
1392 (uint64_t) (stats->ipackets + stats->imissed));
1394 if (cur_fwd_eng == &csum_fwd_engine)
1395 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
1396 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1397 port->rx_bad_outer_l4_csum);
1398 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1399 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1400 printf(" RX-nombufs: %14"PRIu64"\n",
1404 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1406 stats->opackets, port->tx_dropped,
1407 (uint64_t) (stats->opackets + port->tx_dropped));
1410 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1411 if (port->rx_stream)
1412 pkt_burst_stats_display("RX",
1413 &port->rx_stream->rx_burst_stats);
1414 if (port->tx_stream)
1415 pkt_burst_stats_display("TX",
1416 &port->tx_stream->tx_burst_stats);
1419 if (port->rx_queue_stats_mapping_enabled) {
1421 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1422 printf(" Stats reg %2d RX-packets:%14"PRIu64
1423 " RX-errors:%14"PRIu64
1424 " RX-bytes:%14"PRIu64"\n",
1425 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1429 if (port->tx_queue_stats_mapping_enabled) {
1430 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1431 printf(" Stats reg %2d TX-packets:%14"PRIu64
1432 " TX-bytes:%14"PRIu64"\n",
1433 i, stats->q_opackets[i], stats->q_obytes[i]);
1437 printf(" %s--------------------------------%s\n",
1438 fwd_stats_border, fwd_stats_border);
1442 fwd_stream_stats_display(streamid_t stream_id)
1444 struct fwd_stream *fs;
1445 static const char *fwd_top_stats_border = "-------";
1447 fs = fwd_streams[stream_id];
1448 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1449 (fs->fwd_dropped == 0))
1451 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1452 "TX Port=%2d/Queue=%2d %s\n",
1453 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1454 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1455 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1456 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1458 /* if checksum mode */
1459 if (cur_fwd_eng == &csum_fwd_engine) {
1460 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1461 "%-14u Rx- bad outer L4 checksum: %-14u\n",
1462 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1463 fs->rx_bad_outer_l4_csum);
1466 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1467 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1468 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1473 flush_fwd_rx_queues(void)
1475 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1482 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1483 uint64_t timer_period;
1485 /* convert to number of cycles */
1486 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1488 for (j = 0; j < 2; j++) {
1489 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1490 for (rxq = 0; rxq < nb_rxq; rxq++) {
1491 port_id = fwd_ports_ids[rxp];
1493 * testpmd can stuck in the below do while loop
1494 * if rte_eth_rx_burst() always returns nonzero
1495 * packets. So timer is added to exit this loop
1496 * after 1sec timer expiry.
1498 prev_tsc = rte_rdtsc();
1500 nb_rx = rte_eth_rx_burst(port_id, rxq,
1501 pkts_burst, MAX_PKT_BURST);
1502 for (i = 0; i < nb_rx; i++)
1503 rte_pktmbuf_free(pkts_burst[i]);
1505 cur_tsc = rte_rdtsc();
1506 diff_tsc = cur_tsc - prev_tsc;
1507 timer_tsc += diff_tsc;
1508 } while ((nb_rx > 0) &&
1509 (timer_tsc < timer_period));
1513 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1518 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1520 struct fwd_stream **fsm;
1523 #ifdef RTE_LIBRTE_BITRATE
1524 uint64_t tics_per_1sec;
1525 uint64_t tics_datum;
1526 uint64_t tics_current;
1527 uint16_t i, cnt_ports;
1529 cnt_ports = nb_ports;
1530 tics_datum = rte_rdtsc();
1531 tics_per_1sec = rte_get_timer_hz();
1533 fsm = &fwd_streams[fc->stream_idx];
1534 nb_fs = fc->stream_nb;
1536 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1537 (*pkt_fwd)(fsm[sm_id]);
1538 #ifdef RTE_LIBRTE_BITRATE
1539 if (bitrate_enabled != 0 &&
1540 bitrate_lcore_id == rte_lcore_id()) {
1541 tics_current = rte_rdtsc();
1542 if (tics_current - tics_datum >= tics_per_1sec) {
1543 /* Periodic bitrate calculation */
1544 for (i = 0; i < cnt_ports; i++)
1545 rte_stats_bitrate_calc(bitrate_data,
1547 tics_datum = tics_current;
1551 #ifdef RTE_LIBRTE_LATENCY_STATS
1552 if (latencystats_enabled != 0 &&
1553 latencystats_lcore_id == rte_lcore_id())
1554 rte_latencystats_update();
1557 } while (! fc->stopped);
1561 start_pkt_forward_on_core(void *fwd_arg)
1563 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1564 cur_fwd_config.fwd_eng->packet_fwd);
1569 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1570 * Used to start communication flows in network loopback test configurations.
1573 run_one_txonly_burst_on_core(void *fwd_arg)
1575 struct fwd_lcore *fwd_lc;
1576 struct fwd_lcore tmp_lcore;
1578 fwd_lc = (struct fwd_lcore *) fwd_arg;
1579 tmp_lcore = *fwd_lc;
1580 tmp_lcore.stopped = 1;
1581 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1586 * Launch packet forwarding:
1587 * - Setup per-port forwarding context.
1588 * - launch logical cores with their forwarding configuration.
1591 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1593 port_fwd_begin_t port_fwd_begin;
1598 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1599 if (port_fwd_begin != NULL) {
1600 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1601 (*port_fwd_begin)(fwd_ports_ids[i]);
1603 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1604 lc_id = fwd_lcores_cpuids[i];
1605 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1606 fwd_lcores[i]->stopped = 0;
1607 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1608 fwd_lcores[i], lc_id);
1610 printf("launch lcore %u failed - diag=%d\n",
1617 * Update the forward ports list.
1620 update_fwd_ports(portid_t new_pid)
1623 unsigned int new_nb_fwd_ports = 0;
1626 for (i = 0; i < nb_fwd_ports; ++i) {
1627 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1630 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1634 if (new_pid < RTE_MAX_ETHPORTS)
1635 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1637 nb_fwd_ports = new_nb_fwd_ports;
1638 nb_cfg_ports = new_nb_fwd_ports;
1642 * Launch packet forwarding configuration.
1645 start_packet_forwarding(int with_tx_first)
1647 port_fwd_begin_t port_fwd_begin;
1648 port_fwd_end_t port_fwd_end;
1649 struct rte_port *port;
1654 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1655 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1657 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1658 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1660 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1661 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1662 (!nb_rxq || !nb_txq))
1663 rte_exit(EXIT_FAILURE,
1664 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1665 cur_fwd_eng->fwd_mode_name);
1667 if (all_ports_started() == 0) {
1668 printf("Not all ports were started\n");
1671 if (test_done == 0) {
1672 printf("Packet forwarding already started\n");
1678 for (i = 0; i < nb_fwd_ports; i++) {
1679 pt_id = fwd_ports_ids[i];
1680 port = &ports[pt_id];
1681 if (!port->dcb_flag) {
1682 printf("In DCB mode, all forwarding ports must "
1683 "be configured in this mode.\n");
1687 if (nb_fwd_lcores == 1) {
1688 printf("In DCB mode,the nb forwarding cores "
1689 "should be larger than 1.\n");
1698 flush_fwd_rx_queues();
1700 pkt_fwd_config_display(&cur_fwd_config);
1701 rxtx_config_display();
1703 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1704 pt_id = fwd_ports_ids[i];
1705 port = &ports[pt_id];
1706 rte_eth_stats_get(pt_id, &port->stats);
1707 port->tx_dropped = 0;
1709 map_port_queue_stats_mapping_registers(pt_id, port);
1711 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1712 fwd_streams[sm_id]->rx_packets = 0;
1713 fwd_streams[sm_id]->tx_packets = 0;
1714 fwd_streams[sm_id]->fwd_dropped = 0;
1715 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1716 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1717 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1719 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1720 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1721 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1722 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1723 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1725 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1726 fwd_streams[sm_id]->core_cycles = 0;
1729 if (with_tx_first) {
1730 port_fwd_begin = tx_only_engine.port_fwd_begin;
1731 if (port_fwd_begin != NULL) {
1732 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1733 (*port_fwd_begin)(fwd_ports_ids[i]);
1735 while (with_tx_first--) {
1736 launch_packet_forwarding(
1737 run_one_txonly_burst_on_core);
1738 rte_eal_mp_wait_lcore();
1740 port_fwd_end = tx_only_engine.port_fwd_end;
1741 if (port_fwd_end != NULL) {
1742 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1743 (*port_fwd_end)(fwd_ports_ids[i]);
1746 launch_packet_forwarding(start_pkt_forward_on_core);
1750 stop_packet_forwarding(void)
1752 struct rte_eth_stats stats;
1753 struct rte_port *port;
1754 port_fwd_end_t port_fwd_end;
1759 uint64_t total_recv;
1760 uint64_t total_xmit;
1761 uint64_t total_rx_dropped;
1762 uint64_t total_tx_dropped;
1763 uint64_t total_rx_nombuf;
1764 uint64_t tx_dropped;
1765 uint64_t rx_bad_ip_csum;
1766 uint64_t rx_bad_l4_csum;
1767 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1768 uint64_t fwd_cycles;
1771 static const char *acc_stats_border = "+++++++++++++++";
1774 printf("Packet forwarding not started\n");
1777 printf("Telling cores to stop...");
1778 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1779 fwd_lcores[lc_id]->stopped = 1;
1780 printf("\nWaiting for lcores to finish...\n");
1781 rte_eal_mp_wait_lcore();
1782 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1783 if (port_fwd_end != NULL) {
1784 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1785 pt_id = fwd_ports_ids[i];
1786 (*port_fwd_end)(pt_id);
1789 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1792 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1793 if (cur_fwd_config.nb_fwd_streams >
1794 cur_fwd_config.nb_fwd_ports) {
1795 fwd_stream_stats_display(sm_id);
1796 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1797 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1799 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1801 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1804 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1805 tx_dropped = (uint64_t) (tx_dropped +
1806 fwd_streams[sm_id]->fwd_dropped);
1807 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1810 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1811 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1812 fwd_streams[sm_id]->rx_bad_ip_csum);
1813 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1817 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1818 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1819 fwd_streams[sm_id]->rx_bad_l4_csum);
1820 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1823 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1824 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1826 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1827 fwd_cycles = (uint64_t) (fwd_cycles +
1828 fwd_streams[sm_id]->core_cycles);
1833 total_rx_dropped = 0;
1834 total_tx_dropped = 0;
1835 total_rx_nombuf = 0;
1836 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1837 pt_id = fwd_ports_ids[i];
1839 port = &ports[pt_id];
1840 rte_eth_stats_get(pt_id, &stats);
1841 stats.ipackets -= port->stats.ipackets;
1842 port->stats.ipackets = 0;
1843 stats.opackets -= port->stats.opackets;
1844 port->stats.opackets = 0;
1845 stats.ibytes -= port->stats.ibytes;
1846 port->stats.ibytes = 0;
1847 stats.obytes -= port->stats.obytes;
1848 port->stats.obytes = 0;
1849 stats.imissed -= port->stats.imissed;
1850 port->stats.imissed = 0;
1851 stats.oerrors -= port->stats.oerrors;
1852 port->stats.oerrors = 0;
1853 stats.rx_nombuf -= port->stats.rx_nombuf;
1854 port->stats.rx_nombuf = 0;
1856 total_recv += stats.ipackets;
1857 total_xmit += stats.opackets;
1858 total_rx_dropped += stats.imissed;
1859 total_tx_dropped += port->tx_dropped;
1860 total_rx_nombuf += stats.rx_nombuf;
1862 fwd_port_stats_display(pt_id, &stats);
1865 printf("\n %s Accumulated forward statistics for all ports"
1867 acc_stats_border, acc_stats_border);
1868 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1870 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1872 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1873 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1874 if (total_rx_nombuf > 0)
1875 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1876 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1878 acc_stats_border, acc_stats_border);
1879 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1881 printf("\n CPU cycles/packet=%u (total cycles="
1882 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1883 (unsigned int)(fwd_cycles / total_recv),
1884 fwd_cycles, total_recv);
1886 printf("\nDone.\n");
1891 dev_set_link_up(portid_t pid)
1893 if (rte_eth_dev_set_link_up(pid) < 0)
1894 printf("\nSet link up fail.\n");
1898 dev_set_link_down(portid_t pid)
1900 if (rte_eth_dev_set_link_down(pid) < 0)
1901 printf("\nSet link down fail.\n");
1905 all_ports_started(void)
1908 struct rte_port *port;
1910 RTE_ETH_FOREACH_DEV(pi) {
1912 /* Check if there is a port which is not started */
1913 if ((port->port_status != RTE_PORT_STARTED) &&
1914 (port->slave_flag == 0))
1918 /* No port is not started */
1923 port_is_stopped(portid_t port_id)
1925 struct rte_port *port = &ports[port_id];
1927 if ((port->port_status != RTE_PORT_STOPPED) &&
1928 (port->slave_flag == 0))
1934 all_ports_stopped(void)
1938 RTE_ETH_FOREACH_DEV(pi) {
1939 if (!port_is_stopped(pi))
1947 port_is_started(portid_t port_id)
1949 if (port_id_is_invalid(port_id, ENABLED_WARN))
1952 if (ports[port_id].port_status != RTE_PORT_STARTED)
1959 start_port(portid_t pid)
1961 int diag, need_check_link_status = -1;
1964 struct rte_port *port;
1965 struct ether_addr mac_addr;
1966 enum rte_eth_event_type event_type;
1968 if (port_id_is_invalid(pid, ENABLED_WARN))
1973 RTE_ETH_FOREACH_DEV(pi) {
1974 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1977 need_check_link_status = 0;
1979 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1980 RTE_PORT_HANDLING) == 0) {
1981 printf("Port %d is now not stopped\n", pi);
1985 if (port->need_reconfig > 0) {
1986 port->need_reconfig = 0;
1988 if (flow_isolate_all) {
1989 int ret = port_flow_isolate(pi, 1);
1991 printf("Failed to apply isolated"
1992 " mode on port %d\n", pi);
1996 configure_rxtx_dump_callbacks(0);
1997 printf("Configuring Port %d (socket %u)\n", pi,
1999 /* configure port */
2000 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2003 if (rte_atomic16_cmpset(&(port->port_status),
2004 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2005 printf("Port %d can not be set back "
2006 "to stopped\n", pi);
2007 printf("Fail to configure port %d\n", pi);
2008 /* try to reconfigure port next time */
2009 port->need_reconfig = 1;
2013 if (port->need_reconfig_queues > 0) {
2014 port->need_reconfig_queues = 0;
2015 /* setup tx queues */
2016 for (qi = 0; qi < nb_txq; qi++) {
2017 if ((numa_support) &&
2018 (txring_numa[pi] != NUMA_NO_CONFIG))
2019 diag = rte_eth_tx_queue_setup(pi, qi,
2020 port->nb_tx_desc[qi],
2022 &(port->tx_conf[qi]));
2024 diag = rte_eth_tx_queue_setup(pi, qi,
2025 port->nb_tx_desc[qi],
2027 &(port->tx_conf[qi]));
2032 /* Fail to setup tx queue, return */
2033 if (rte_atomic16_cmpset(&(port->port_status),
2035 RTE_PORT_STOPPED) == 0)
2036 printf("Port %d can not be set back "
2037 "to stopped\n", pi);
2038 printf("Fail to configure port %d tx queues\n",
2040 /* try to reconfigure queues next time */
2041 port->need_reconfig_queues = 1;
2044 for (qi = 0; qi < nb_rxq; qi++) {
2045 /* setup rx queues */
2046 if ((numa_support) &&
2047 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2048 struct rte_mempool * mp =
2049 mbuf_pool_find(rxring_numa[pi]);
2051 printf("Failed to setup RX queue:"
2052 "No mempool allocation"
2053 " on the socket %d\n",
2058 diag = rte_eth_rx_queue_setup(pi, qi,
2059 port->nb_rx_desc[qi],
2061 &(port->rx_conf[qi]),
2064 struct rte_mempool *mp =
2065 mbuf_pool_find(port->socket_id);
2067 printf("Failed to setup RX queue:"
2068 "No mempool allocation"
2069 " on the socket %d\n",
2073 diag = rte_eth_rx_queue_setup(pi, qi,
2074 port->nb_rx_desc[qi],
2076 &(port->rx_conf[qi]),
2082 /* Fail to setup rx queue, return */
2083 if (rte_atomic16_cmpset(&(port->port_status),
2085 RTE_PORT_STOPPED) == 0)
2086 printf("Port %d can not be set back "
2087 "to stopped\n", pi);
2088 printf("Fail to configure port %d rx queues\n",
2090 /* try to reconfigure queues next time */
2091 port->need_reconfig_queues = 1;
2095 configure_rxtx_dump_callbacks(verbose_level);
2097 if (rte_eth_dev_start(pi) < 0) {
2098 printf("Fail to start port %d\n", pi);
2100 /* Fail to setup rx queue, return */
2101 if (rte_atomic16_cmpset(&(port->port_status),
2102 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2103 printf("Port %d can not be set back to "
2108 if (rte_atomic16_cmpset(&(port->port_status),
2109 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2110 printf("Port %d can not be set into started\n", pi);
2112 rte_eth_macaddr_get(pi, &mac_addr);
2113 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2114 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2115 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2116 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2118 /* at least one port started, need checking link status */
2119 need_check_link_status = 1;
2122 for (event_type = RTE_ETH_EVENT_UNKNOWN;
2123 event_type < RTE_ETH_EVENT_MAX;
2125 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2130 printf("Failed to setup even callback for event %d\n",
2136 if (need_check_link_status == 1 && !no_link_check)
2137 check_all_ports_link_status(RTE_PORT_ALL);
2138 else if (need_check_link_status == 0)
2139 printf("Please stop the ports first\n");
2146 stop_port(portid_t pid)
2149 struct rte_port *port;
2150 int need_check_link_status = 0;
2157 if (port_id_is_invalid(pid, ENABLED_WARN))
2160 printf("Stopping ports...\n");
2162 RTE_ETH_FOREACH_DEV(pi) {
2163 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2166 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2167 printf("Please remove port %d from forwarding configuration.\n", pi);
2171 if (port_is_bonding_slave(pi)) {
2172 printf("Please remove port %d from bonded device.\n", pi);
2177 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2178 RTE_PORT_HANDLING) == 0)
2181 rte_eth_dev_stop(pi);
2183 if (rte_atomic16_cmpset(&(port->port_status),
2184 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2185 printf("Port %d can not be set into stopped\n", pi);
2186 need_check_link_status = 1;
2188 if (need_check_link_status && !no_link_check)
2189 check_all_ports_link_status(RTE_PORT_ALL);
2195 remove_unused_fwd_ports(void)
2198 int last_port_idx = nb_ports - 1;
2200 for (i = 0; i <= last_port_idx; i++) { /* iterate in ports_ids */
2201 if (rte_eth_devices[ports_ids[i]].state != RTE_ETH_DEV_UNUSED)
2203 /* skip unused ports at the end */
2204 while (i <= last_port_idx &&
2205 rte_eth_devices[ports_ids[last_port_idx]].state
2206 == RTE_ETH_DEV_UNUSED)
2208 if (last_port_idx < i)
2210 /* overwrite unused port with last valid port */
2211 ports_ids[i] = ports_ids[last_port_idx];
2212 /* decrease ports count */
2215 nb_ports = rte_eth_dev_count_avail();
2216 update_fwd_ports(RTE_MAX_ETHPORTS);
2220 close_port(portid_t pid)
2223 struct rte_port *port;
2225 if (port_id_is_invalid(pid, ENABLED_WARN))
2228 printf("Closing ports...\n");
2230 RTE_ETH_FOREACH_DEV(pi) {
2231 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2234 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2235 printf("Please remove port %d from forwarding configuration.\n", pi);
2239 if (port_is_bonding_slave(pi)) {
2240 printf("Please remove port %d from bonded device.\n", pi);
2245 if (rte_atomic16_cmpset(&(port->port_status),
2246 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2247 printf("Port %d is already closed\n", pi);
2251 if (rte_atomic16_cmpset(&(port->port_status),
2252 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2253 printf("Port %d is now not stopped\n", pi);
2257 if (port->flow_list)
2258 port_flow_flush(pi);
2259 rte_eth_dev_close(pi);
2261 remove_unused_fwd_ports();
2263 if (rte_atomic16_cmpset(&(port->port_status),
2264 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2265 printf("Port %d cannot be set to closed\n", pi);
2272 reset_port(portid_t pid)
2276 struct rte_port *port;
2278 if (port_id_is_invalid(pid, ENABLED_WARN))
2281 printf("Resetting ports...\n");
2283 RTE_ETH_FOREACH_DEV(pi) {
2284 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2287 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2288 printf("Please remove port %d from forwarding "
2289 "configuration.\n", pi);
2293 if (port_is_bonding_slave(pi)) {
2294 printf("Please remove port %d from bonded device.\n",
2299 diag = rte_eth_dev_reset(pi);
2302 port->need_reconfig = 1;
2303 port->need_reconfig_queues = 1;
2305 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2313 attach_port(char *identifier)
2316 unsigned int socket_id;
2318 printf("Attaching a new port...\n");
2320 if (identifier == NULL) {
2321 printf("Invalid parameters are specified\n");
2325 if (rte_eth_dev_attach(identifier, &pi))
2328 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2329 /* if socket_id is invalid, set to the first available socket. */
2330 if (check_socket_id(socket_id) < 0)
2331 socket_id = socket_ids[0];
2332 reconfig(pi, socket_id);
2333 rte_eth_promiscuous_enable(pi);
2335 ports_ids[nb_ports] = pi;
2336 nb_ports = rte_eth_dev_count_avail();
2338 ports[pi].port_status = RTE_PORT_STOPPED;
2340 update_fwd_ports(pi);
2342 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2347 detach_port(portid_t port_id)
2349 char name[RTE_ETH_NAME_MAX_LEN];
2351 printf("Detaching a port...\n");
2353 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2354 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2355 printf("Port not stopped\n");
2358 printf("Port was not closed\n");
2359 if (ports[port_id].flow_list)
2360 port_flow_flush(port_id);
2363 if (rte_eth_dev_detach(port_id, name)) {
2364 TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2368 remove_unused_fwd_ports();
2370 printf("Port %u is detached. Now total ports is %d\n",
2379 struct rte_device *device;
2384 stop_packet_forwarding();
2386 if (ports != NULL) {
2388 RTE_ETH_FOREACH_DEV(pt_id) {
2389 printf("\nShutting down port %d...\n", pt_id);
2395 * This is a workaround to fix a virtio-user issue that
2396 * requires to call clean-up routine to remove existing
2398 * This workaround valid only for testpmd, needs a fix
2399 * valid for all applications.
2400 * TODO: Implement proper resource cleanup
2402 device = rte_eth_devices[pt_id].device;
2403 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2409 ret = rte_dev_event_monitor_stop();
2412 "fail to stop device event monitor.");
2416 ret = rte_dev_event_callback_unregister(NULL,
2417 eth_dev_event_callback, NULL);
2420 "fail to unregister device event callback.\n");
2424 ret = rte_dev_hotplug_handle_disable();
2427 "fail to disable hotplug handling.\n");
2432 printf("\nBye...\n");
2435 typedef void (*cmd_func_t)(void);
2436 struct pmd_test_command {
2437 const char *cmd_name;
2438 cmd_func_t cmd_func;
2441 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2443 /* Check the link status of all ports in up to 9s, and print them finally */
2445 check_all_ports_link_status(uint32_t port_mask)
2447 #define CHECK_INTERVAL 100 /* 100ms */
2448 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2450 uint8_t count, all_ports_up, print_flag = 0;
2451 struct rte_eth_link link;
2453 printf("Checking link statuses...\n");
2455 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2457 RTE_ETH_FOREACH_DEV(portid) {
2458 if ((port_mask & (1 << portid)) == 0)
2460 memset(&link, 0, sizeof(link));
2461 rte_eth_link_get_nowait(portid, &link);
2462 /* print link status if flag set */
2463 if (print_flag == 1) {
2464 if (link.link_status)
2466 "Port%d Link Up. speed %u Mbps- %s\n",
2467 portid, link.link_speed,
2468 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2469 ("full-duplex") : ("half-duplex\n"));
2471 printf("Port %d Link Down\n", portid);
2474 /* clear all_ports_up flag if any link down */
2475 if (link.link_status == ETH_LINK_DOWN) {
2480 /* after finally printing all link status, get out */
2481 if (print_flag == 1)
2484 if (all_ports_up == 0) {
2486 rte_delay_ms(CHECK_INTERVAL);
2489 /* set the print_flag if all ports up or timeout */
2490 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2500 rmv_event_callback(void *arg)
2502 int need_to_start = 0;
2503 int org_no_link_check = no_link_check;
2504 portid_t port_id = (intptr_t)arg;
2506 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2508 if (!test_done && port_is_forwarding(port_id)) {
2510 stop_packet_forwarding();
2514 no_link_check = org_no_link_check;
2515 close_port(port_id);
2516 detach_port(port_id);
2518 start_packet_forwarding(0);
2521 /* This function is used by the interrupt thread */
2523 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2526 static const char * const event_desc[] = {
2527 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2528 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2529 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2530 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2531 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2532 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2533 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2534 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2535 [RTE_ETH_EVENT_NEW] = "device probed",
2536 [RTE_ETH_EVENT_DESTROY] = "device released",
2537 [RTE_ETH_EVENT_MAX] = NULL,
2540 RTE_SET_USED(param);
2541 RTE_SET_USED(ret_param);
2543 if (type >= RTE_ETH_EVENT_MAX) {
2544 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2545 port_id, __func__, type);
2547 } else if (event_print_mask & (UINT32_C(1) << type)) {
2548 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2553 if (port_id_is_invalid(port_id, DISABLED_WARN))
2557 case RTE_ETH_EVENT_INTR_RMV:
2558 if (rte_eal_alarm_set(100000,
2559 rmv_event_callback, (void *)(intptr_t)port_id))
2560 fprintf(stderr, "Could not set up deferred device removal\n");
2568 /* This function is used by the interrupt thread */
2570 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2571 __rte_unused void *arg)
2576 if (type >= RTE_DEV_EVENT_MAX) {
2577 fprintf(stderr, "%s called upon invalid event %d\n",
2583 case RTE_DEV_EVENT_REMOVE:
2584 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2586 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2588 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2592 rmv_event_callback((void *)(intptr_t)port_id);
2594 case RTE_DEV_EVENT_ADD:
2595 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2597 /* TODO: After finish kernel driver binding,
2598 * begin to attach port.
2607 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2611 uint8_t mapping_found = 0;
2613 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2614 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2615 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2616 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2617 tx_queue_stats_mappings[i].queue_id,
2618 tx_queue_stats_mappings[i].stats_counter_id);
2625 port->tx_queue_stats_mapping_enabled = 1;
2630 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2634 uint8_t mapping_found = 0;
2636 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2637 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2638 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2639 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2640 rx_queue_stats_mappings[i].queue_id,
2641 rx_queue_stats_mappings[i].stats_counter_id);
2648 port->rx_queue_stats_mapping_enabled = 1;
2653 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2657 diag = set_tx_queue_stats_mapping_registers(pi, port);
2659 if (diag == -ENOTSUP) {
2660 port->tx_queue_stats_mapping_enabled = 0;
2661 printf("TX queue stats mapping not supported port id=%d\n", pi);
2664 rte_exit(EXIT_FAILURE,
2665 "set_tx_queue_stats_mapping_registers "
2666 "failed for port id=%d diag=%d\n",
2670 diag = set_rx_queue_stats_mapping_registers(pi, port);
2672 if (diag == -ENOTSUP) {
2673 port->rx_queue_stats_mapping_enabled = 0;
2674 printf("RX queue stats mapping not supported port id=%d\n", pi);
2677 rte_exit(EXIT_FAILURE,
2678 "set_rx_queue_stats_mapping_registers "
2679 "failed for port id=%d diag=%d\n",
2685 rxtx_port_config(struct rte_port *port)
2689 for (qid = 0; qid < nb_rxq; qid++) {
2690 port->rx_conf[qid] = port->dev_info.default_rxconf;
2692 /* Check if any Rx parameters have been passed */
2693 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2694 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2696 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2697 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2699 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2700 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2702 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2703 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2705 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2706 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2708 port->nb_rx_desc[qid] = nb_rxd;
2711 for (qid = 0; qid < nb_txq; qid++) {
2712 port->tx_conf[qid] = port->dev_info.default_txconf;
2714 /* Check if any Tx parameters have been passed */
2715 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2716 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2718 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2719 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2721 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2722 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2724 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2725 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2727 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2728 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2730 port->nb_tx_desc[qid] = nb_txd;
2735 init_port_config(void)
2738 struct rte_port *port;
2740 RTE_ETH_FOREACH_DEV(pid) {
2742 port->dev_conf.fdir_conf = fdir_conf;
2743 rte_eth_dev_info_get(pid, &port->dev_info);
2745 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2746 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2747 rss_hf & port->dev_info.flow_type_rss_offloads;
2749 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2750 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2753 if (port->dcb_flag == 0) {
2754 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2755 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2757 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2760 rxtx_port_config(port);
2762 rte_eth_macaddr_get(pid, &port->eth_addr);
2764 map_port_queue_stats_mapping_registers(pid, port);
2765 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2766 rte_pmd_ixgbe_bypass_init(pid);
2769 if (lsc_interrupt &&
2770 (rte_eth_devices[pid].data->dev_flags &
2771 RTE_ETH_DEV_INTR_LSC))
2772 port->dev_conf.intr_conf.lsc = 1;
2773 if (rmv_interrupt &&
2774 (rte_eth_devices[pid].data->dev_flags &
2775 RTE_ETH_DEV_INTR_RMV))
2776 port->dev_conf.intr_conf.rmv = 1;
2780 void set_port_slave_flag(portid_t slave_pid)
2782 struct rte_port *port;
2784 port = &ports[slave_pid];
2785 port->slave_flag = 1;
2788 void clear_port_slave_flag(portid_t slave_pid)
2790 struct rte_port *port;
2792 port = &ports[slave_pid];
2793 port->slave_flag = 0;
2796 uint8_t port_is_bonding_slave(portid_t slave_pid)
2798 struct rte_port *port;
2800 port = &ports[slave_pid];
2801 if ((rte_eth_devices[slave_pid].data->dev_flags &
2802 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2807 const uint16_t vlan_tags[] = {
2808 0, 1, 2, 3, 4, 5, 6, 7,
2809 8, 9, 10, 11, 12, 13, 14, 15,
2810 16, 17, 18, 19, 20, 21, 22, 23,
2811 24, 25, 26, 27, 28, 29, 30, 31
2815 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2816 enum dcb_mode_enable dcb_mode,
2817 enum rte_eth_nb_tcs num_tcs,
2822 struct rte_eth_rss_conf rss_conf;
2825 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2826 * given above, and the number of traffic classes available for use.
2828 if (dcb_mode == DCB_VT_ENABLED) {
2829 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2830 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2831 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2832 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2834 /* VMDQ+DCB RX and TX configurations */
2835 vmdq_rx_conf->enable_default_pool = 0;
2836 vmdq_rx_conf->default_pool = 0;
2837 vmdq_rx_conf->nb_queue_pools =
2838 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2839 vmdq_tx_conf->nb_queue_pools =
2840 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2842 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2843 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2844 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2845 vmdq_rx_conf->pool_map[i].pools =
2846 1 << (i % vmdq_rx_conf->nb_queue_pools);
2848 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2849 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2850 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2853 /* set DCB mode of RX and TX of multiple queues */
2854 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2855 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2857 struct rte_eth_dcb_rx_conf *rx_conf =
2858 ð_conf->rx_adv_conf.dcb_rx_conf;
2859 struct rte_eth_dcb_tx_conf *tx_conf =
2860 ð_conf->tx_adv_conf.dcb_tx_conf;
2862 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2866 rx_conf->nb_tcs = num_tcs;
2867 tx_conf->nb_tcs = num_tcs;
2869 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2870 rx_conf->dcb_tc[i] = i % num_tcs;
2871 tx_conf->dcb_tc[i] = i % num_tcs;
2874 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2875 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2876 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2880 eth_conf->dcb_capability_en =
2881 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2883 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2889 init_port_dcb_config(portid_t pid,
2890 enum dcb_mode_enable dcb_mode,
2891 enum rte_eth_nb_tcs num_tcs,
2894 struct rte_eth_conf port_conf;
2895 struct rte_port *rte_port;
2899 rte_port = &ports[pid];
2901 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2902 /* Enter DCB configuration status */
2905 port_conf.rxmode = rte_port->dev_conf.rxmode;
2906 port_conf.txmode = rte_port->dev_conf.txmode;
2908 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2909 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2912 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2914 /* re-configure the device . */
2915 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2917 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2919 /* If dev_info.vmdq_pool_base is greater than 0,
2920 * the queue id of vmdq pools is started after pf queues.
2922 if (dcb_mode == DCB_VT_ENABLED &&
2923 rte_port->dev_info.vmdq_pool_base > 0) {
2924 printf("VMDQ_DCB multi-queue mode is nonsensical"
2925 " for port %d.", pid);
2929 /* Assume the ports in testpmd have the same dcb capability
2930 * and has the same number of rxq and txq in dcb mode
2932 if (dcb_mode == DCB_VT_ENABLED) {
2933 if (rte_port->dev_info.max_vfs > 0) {
2934 nb_rxq = rte_port->dev_info.nb_rx_queues;
2935 nb_txq = rte_port->dev_info.nb_tx_queues;
2937 nb_rxq = rte_port->dev_info.max_rx_queues;
2938 nb_txq = rte_port->dev_info.max_tx_queues;
2941 /*if vt is disabled, use all pf queues */
2942 if (rte_port->dev_info.vmdq_pool_base == 0) {
2943 nb_rxq = rte_port->dev_info.max_rx_queues;
2944 nb_txq = rte_port->dev_info.max_tx_queues;
2946 nb_rxq = (queueid_t)num_tcs;
2947 nb_txq = (queueid_t)num_tcs;
2951 rx_free_thresh = 64;
2953 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2955 rxtx_port_config(rte_port);
2957 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2958 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2959 rx_vft_set(pid, vlan_tags[i], 1);
2961 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2962 map_port_queue_stats_mapping_registers(pid, rte_port);
2964 rte_port->dcb_flag = 1;
2972 /* Configuration of Ethernet ports. */
2973 ports = rte_zmalloc("testpmd: ports",
2974 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2975 RTE_CACHE_LINE_SIZE);
2976 if (ports == NULL) {
2977 rte_exit(EXIT_FAILURE,
2978 "rte_zmalloc(%d struct rte_port) failed\n",
2982 /* Initialize ports NUMA structures */
2983 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2984 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2985 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2999 const char clr[] = { 27, '[', '2', 'J', '\0' };
3000 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3002 /* Clear screen and move to top left */
3003 printf("%s%s", clr, top_left);
3005 printf("\nPort statistics ====================================");
3006 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3007 nic_stats_display(fwd_ports_ids[i]);
3011 signal_handler(int signum)
3013 if (signum == SIGINT || signum == SIGTERM) {
3014 printf("\nSignal %d received, preparing to exit...\n",
3016 #ifdef RTE_LIBRTE_PDUMP
3017 /* uninitialize packet capture framework */
3020 #ifdef RTE_LIBRTE_LATENCY_STATS
3021 rte_latencystats_uninit();
3024 /* Set flag to indicate the force termination. */
3026 /* exit with the expected status */
3027 signal(signum, SIG_DFL);
3028 kill(getpid(), signum);
3033 main(int argc, char** argv)
3040 signal(SIGINT, signal_handler);
3041 signal(SIGTERM, signal_handler);
3043 diag = rte_eal_init(argc, argv);
3045 rte_panic("Cannot init EAL\n");
3047 testpmd_logtype = rte_log_register("testpmd");
3048 if (testpmd_logtype < 0)
3049 rte_panic("Cannot register log type");
3050 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3052 #ifdef RTE_LIBRTE_PDUMP
3053 /* initialize packet capture framework */
3054 rte_pdump_init(NULL);
3058 RTE_ETH_FOREACH_DEV(port_id) {
3059 ports_ids[count] = port_id;
3062 nb_ports = (portid_t) count;
3064 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3066 /* allocate port structures, and init them */
3069 set_def_fwd_config();
3071 rte_panic("Empty set of forwarding logical cores - check the "
3072 "core mask supplied in the command parameters\n");
3074 /* Bitrate/latency stats disabled by default */
3075 #ifdef RTE_LIBRTE_BITRATE
3076 bitrate_enabled = 0;
3078 #ifdef RTE_LIBRTE_LATENCY_STATS
3079 latencystats_enabled = 0;
3082 /* on FreeBSD, mlockall() is disabled by default */
3083 #ifdef RTE_EXEC_ENV_BSDAPP
3092 launch_args_parse(argc, argv);
3094 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3095 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3099 if (tx_first && interactive)
3100 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3101 "interactive mode.\n");
3103 if (tx_first && lsc_interrupt) {
3104 printf("Warning: lsc_interrupt needs to be off when "
3105 " using tx_first. Disabling.\n");
3109 if (!nb_rxq && !nb_txq)
3110 printf("Warning: Either rx or tx queues should be non-zero\n");
3112 if (nb_rxq > 1 && nb_rxq > nb_txq)
3113 printf("Warning: nb_rxq=%d enables RSS configuration, "
3114 "but nb_txq=%d will prevent to fully test it.\n",
3120 ret = rte_dev_hotplug_handle_enable();
3123 "fail to enable hotplug handling.");
3127 ret = rte_dev_event_monitor_start();
3130 "fail to start device event monitoring.");
3134 ret = rte_dev_event_callback_register(NULL,
3135 eth_dev_event_callback, NULL);
3138 "fail to register device event callback\n");
3143 if (start_port(RTE_PORT_ALL) != 0)
3144 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3146 /* set all ports to promiscuous mode by default */
3147 RTE_ETH_FOREACH_DEV(port_id)
3148 rte_eth_promiscuous_enable(port_id);
3150 /* Init metrics library */
3151 rte_metrics_init(rte_socket_id());
3153 #ifdef RTE_LIBRTE_LATENCY_STATS
3154 if (latencystats_enabled != 0) {
3155 int ret = rte_latencystats_init(1, NULL);
3157 printf("Warning: latencystats init()"
3158 " returned error %d\n", ret);
3159 printf("Latencystats running on lcore %d\n",
3160 latencystats_lcore_id);
3164 /* Setup bitrate stats */
3165 #ifdef RTE_LIBRTE_BITRATE
3166 if (bitrate_enabled != 0) {
3167 bitrate_data = rte_stats_bitrate_create();
3168 if (bitrate_data == NULL)
3169 rte_exit(EXIT_FAILURE,
3170 "Could not allocate bitrate data.\n");
3171 rte_stats_bitrate_reg(bitrate_data);
3175 #ifdef RTE_LIBRTE_CMDLINE
3176 if (strlen(cmdline_filename) != 0)
3177 cmdline_read_from_file(cmdline_filename);
3179 if (interactive == 1) {
3181 printf("Start automatic packet forwarding\n");
3182 start_packet_forwarding(0);
3194 printf("No commandline core given, start packet forwarding\n");
3195 start_packet_forwarding(tx_first);
3196 if (stats_period != 0) {
3197 uint64_t prev_time = 0, cur_time, diff_time = 0;
3198 uint64_t timer_period;
3200 /* Convert to number of cycles */
3201 timer_period = stats_period * rte_get_timer_hz();
3203 while (f_quit == 0) {
3204 cur_time = rte_get_timer_cycles();
3205 diff_time += cur_time - prev_time;
3207 if (diff_time >= timer_period) {
3209 /* Reset the timer */
3212 /* Sleep to avoid unnecessary checks */
3213 prev_time = cur_time;
3218 printf("Press enter to exit\n");
3219 rc = read(0, &c, 1);