1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
230 * Configurable number of RX/TX queues.
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
236 * Configurable number of RX/TX ring descriptors.
237 * Defaults are supplied by drivers via ethdev.
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
244 #define RTE_PMD_PARAM_UNSET -1
246 * Configurable values of RX and TX ring threshold registers.
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of RX free threshold.
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of RX drop enable.
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
268 * Configurable value of TX free threshold.
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
273 * Configurable value of TX RS bit threshold.
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
278 * Configurable value of buffered packets before sending.
280 uint16_t noisy_tx_sw_bufsz;
283 * Configurable value of packet buffer timeout.
285 uint16_t noisy_tx_sw_buf_flush_time;
288 * Configurable value for size of VNF internal memory area
289 * used for simulating noisy neighbour behaviour
291 uint64_t noisy_lkup_mem_sz;
294 * Configurable value of number of random writes done in
295 * VNF simulation memory area.
297 uint64_t noisy_lkup_num_writes;
300 * Configurable value of number of random reads done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_reads;
306 * Configurable value of number of random reads/writes done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads_writes;
312 * Receive Side Scaling (RSS) configuration.
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
317 * Port topology configuration
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
322 * Avoids to flush all the RX streams before starts forwarding.
324 uint8_t no_flush_rx = 0; /* flush by default */
327 * Flow API isolated mode.
329 uint8_t flow_isolate_all;
332 * Avoids to check link status when starting/stopping a port.
334 uint8_t no_link_check = 0; /* check by default */
337 * Enable link status change notification
339 uint8_t lsc_interrupt = 1; /* enabled by default */
342 * Enable device removal notification.
344 uint8_t rmv_interrupt = 1; /* enabled by default */
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
349 * Display or mask ether events
350 * Default to all events except VF_MBOX
352 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
353 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
354 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
355 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
356 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
357 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
358 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
360 * Decide if all memory are locked for performance.
365 * NIC bypass mode configuration options.
368 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
369 /* The NIC bypass watchdog timeout. */
370 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
374 #ifdef RTE_LIBRTE_LATENCY_STATS
377 * Set when latency stats is enabled in the commandline
379 uint8_t latencystats_enabled;
382 * Lcore ID to serive latency statistics.
384 lcoreid_t latencystats_lcore_id = -1;
389 * Ethernet device configuration.
391 struct rte_eth_rxmode rx_mode = {
392 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
395 struct rte_eth_txmode tx_mode = {
396 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
399 struct rte_fdir_conf fdir_conf = {
400 .mode = RTE_FDIR_MODE_NONE,
401 .pballoc = RTE_FDIR_PBALLOC_64K,
402 .status = RTE_FDIR_REPORT_STATUS,
404 .vlan_tci_mask = 0xFFEF,
406 .src_ip = 0xFFFFFFFF,
407 .dst_ip = 0xFFFFFFFF,
410 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
411 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
413 .src_port_mask = 0xFFFF,
414 .dst_port_mask = 0xFFFF,
415 .mac_addr_byte_mask = 0xFF,
416 .tunnel_type_mask = 1,
417 .tunnel_id_mask = 0xFFFFFFFF,
422 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
424 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
425 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
427 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
428 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
430 uint16_t nb_tx_queue_stats_mappings = 0;
431 uint16_t nb_rx_queue_stats_mappings = 0;
434 * Display zero values by default for xstats
436 uint8_t xstats_hide_zero;
438 unsigned int num_sockets = 0;
439 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
441 #ifdef RTE_LIBRTE_BITRATE
442 /* Bitrate statistics */
443 struct rte_stats_bitrates *bitrate_data;
444 lcoreid_t bitrate_lcore_id;
445 uint8_t bitrate_enabled;
448 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
449 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
451 struct vxlan_encap_conf vxlan_encap_conf = {
454 .vni = "\x00\x00\x00",
456 .udp_dst = RTE_BE16(4789),
457 .ipv4_src = IPv4(127, 0, 0, 1),
458 .ipv4_dst = IPv4(255, 255, 255, 255),
459 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
460 "\x00\x00\x00\x00\x00\x00\x00\x01",
461 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
462 "\x00\x00\x00\x00\x00\x00\x11\x11",
464 .eth_src = "\x00\x00\x00\x00\x00\x00",
465 .eth_dst = "\xff\xff\xff\xff\xff\xff",
468 struct nvgre_encap_conf nvgre_encap_conf = {
471 .tni = "\x00\x00\x00",
472 .ipv4_src = IPv4(127, 0, 0, 1),
473 .ipv4_dst = IPv4(255, 255, 255, 255),
474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 "\x00\x00\x00\x00\x00\x00\x00\x01",
476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 "\x00\x00\x00\x00\x00\x00\x11\x11",
479 .eth_src = "\x00\x00\x00\x00\x00\x00",
480 .eth_dst = "\xff\xff\xff\xff\xff\xff",
483 /* Forward function declarations */
484 static void setup_attached_port(portid_t pi);
485 static void map_port_queue_stats_mapping_registers(portid_t pi,
486 struct rte_port *port);
487 static void check_all_ports_link_status(uint32_t port_mask);
488 static int eth_event_callback(portid_t port_id,
489 enum rte_eth_event_type type,
490 void *param, void *ret_param);
491 static void eth_dev_event_callback(const char *device_name,
492 enum rte_dev_event_type type,
496 * Check if all the ports are started.
497 * If yes, return positive value. If not, return zero.
499 static int all_ports_started(void);
501 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
502 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
505 * Helper function to check if socket is already discovered.
506 * If yes, return positive value. If not, return zero.
509 new_socket_id(unsigned int socket_id)
513 for (i = 0; i < num_sockets; i++) {
514 if (socket_ids[i] == socket_id)
521 * Setup default configuration.
524 set_default_fwd_lcores_config(void)
528 unsigned int sock_num;
531 for (i = 0; i < RTE_MAX_LCORE; i++) {
532 if (!rte_lcore_is_enabled(i))
534 sock_num = rte_lcore_to_socket_id(i);
535 if (new_socket_id(sock_num)) {
536 if (num_sockets >= RTE_MAX_NUMA_NODES) {
537 rte_exit(EXIT_FAILURE,
538 "Total sockets greater than %u\n",
541 socket_ids[num_sockets++] = sock_num;
543 if (i == rte_get_master_lcore())
545 fwd_lcores_cpuids[nb_lc++] = i;
547 nb_lcores = (lcoreid_t) nb_lc;
548 nb_cfg_lcores = nb_lcores;
553 set_def_peer_eth_addrs(void)
557 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
558 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
559 peer_eth_addrs[i].addr_bytes[5] = i;
564 set_default_fwd_ports_config(void)
569 RTE_ETH_FOREACH_DEV(pt_id) {
570 fwd_ports_ids[i++] = pt_id;
572 /* Update sockets info according to the attached device */
573 int socket_id = rte_eth_dev_socket_id(pt_id);
574 if (socket_id >= 0 && new_socket_id(socket_id)) {
575 if (num_sockets >= RTE_MAX_NUMA_NODES) {
576 rte_exit(EXIT_FAILURE,
577 "Total sockets greater than %u\n",
580 socket_ids[num_sockets++] = socket_id;
584 nb_cfg_ports = nb_ports;
585 nb_fwd_ports = nb_ports;
589 set_def_fwd_config(void)
591 set_default_fwd_lcores_config();
592 set_def_peer_eth_addrs();
593 set_default_fwd_ports_config();
596 /* extremely pessimistic estimation of memory required to create a mempool */
598 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
600 unsigned int n_pages, mbuf_per_pg, leftover;
601 uint64_t total_mem, mbuf_mem, obj_sz;
603 /* there is no good way to predict how much space the mempool will
604 * occupy because it will allocate chunks on the fly, and some of those
605 * will come from default DPDK memory while some will come from our
606 * external memory, so just assume 128MB will be enough for everyone.
608 uint64_t hdr_mem = 128 << 20;
610 /* account for possible non-contiguousness */
611 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
613 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
617 mbuf_per_pg = pgsz / obj_sz;
618 leftover = (nb_mbufs % mbuf_per_pg) > 0;
619 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
621 mbuf_mem = n_pages * pgsz;
623 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
625 if (total_mem > SIZE_MAX) {
626 TESTPMD_LOG(ERR, "Memory size too big\n");
629 *out = (size_t)total_mem;
634 static inline uint32_t
637 return (uint32_t)__builtin_ctzll(v);
640 static inline uint32_t
645 v = rte_align64pow2(v);
650 pagesz_flags(uint64_t page_sz)
652 /* as per mmap() manpage, all page sizes are log2 of page size
653 * shifted by MAP_HUGE_SHIFT
655 int log2 = log2_u64(page_sz);
657 return (log2 << HUGE_SHIFT);
661 alloc_mem(size_t memsz, size_t pgsz, bool huge)
666 /* allocate anonymous hugepages */
667 flags = MAP_ANONYMOUS | MAP_PRIVATE;
669 flags |= HUGE_FLAG | pagesz_flags(pgsz);
671 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
672 if (addr == MAP_FAILED)
678 struct extmem_param {
682 rte_iova_t *iova_table;
683 unsigned int iova_table_len;
687 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
690 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
691 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
692 unsigned int cur_page, n_pages, pgsz_idx;
693 size_t mem_sz, cur_pgsz;
694 rte_iova_t *iovas = NULL;
698 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
699 /* skip anything that is too big */
700 if (pgsizes[pgsz_idx] > SIZE_MAX)
703 cur_pgsz = pgsizes[pgsz_idx];
705 /* if we were told not to allocate hugepages, override */
707 cur_pgsz = sysconf(_SC_PAGESIZE);
709 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
711 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
715 /* allocate our memory */
716 addr = alloc_mem(mem_sz, cur_pgsz, huge);
718 /* if we couldn't allocate memory with a specified page size,
719 * that doesn't mean we can't do it with other page sizes, so
725 /* store IOVA addresses for every page in this memory area */
726 n_pages = mem_sz / cur_pgsz;
728 iovas = malloc(sizeof(*iovas) * n_pages);
731 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
734 /* lock memory if it's not huge pages */
738 /* populate IOVA addresses */
739 for (cur_page = 0; cur_page < n_pages; cur_page++) {
744 offset = cur_pgsz * cur_page;
745 cur = RTE_PTR_ADD(addr, offset);
747 /* touch the page before getting its IOVA */
748 *(volatile char *)cur = 0;
750 iova = rte_mem_virt2iova(cur);
752 iovas[cur_page] = iova;
757 /* if we couldn't allocate anything */
763 param->pgsz = cur_pgsz;
764 param->iova_table = iovas;
765 param->iova_table_len = n_pages;
772 munmap(addr, mem_sz);
778 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
780 struct extmem_param param;
783 memset(¶m, 0, sizeof(param));
785 /* check if our heap exists */
786 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
788 /* create our heap */
789 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
791 TESTPMD_LOG(ERR, "Cannot create heap\n");
796 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
798 TESTPMD_LOG(ERR, "Cannot create memory area\n");
802 /* we now have a valid memory area, so add it to heap */
803 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
804 param.addr, param.len, param.iova_table,
805 param.iova_table_len, param.pgsz);
807 /* when using VFIO, memory is automatically mapped for DMA by EAL */
809 /* not needed any more */
810 free(param.iova_table);
813 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
814 munmap(param.addr, param.len);
820 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
827 * Configuration initialisation done once at init time.
830 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
831 unsigned int socket_id)
833 char pool_name[RTE_MEMPOOL_NAMESIZE];
834 struct rte_mempool *rte_mp = NULL;
837 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
838 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
841 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
842 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
844 switch (mp_alloc_type) {
845 case MP_ALLOC_NATIVE:
847 /* wrapper to rte_mempool_create() */
848 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
849 rte_mbuf_best_mempool_ops());
850 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
851 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
856 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
857 mb_size, (unsigned int) mb_mempool_cache,
858 sizeof(struct rte_pktmbuf_pool_private),
863 if (rte_mempool_populate_anon(rte_mp) == 0) {
864 rte_mempool_free(rte_mp);
868 rte_pktmbuf_pool_init(rte_mp, NULL);
869 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
873 case MP_ALLOC_XMEM_HUGE:
876 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
878 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
879 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
882 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
884 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
886 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
887 rte_mbuf_best_mempool_ops());
888 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
889 mb_mempool_cache, 0, mbuf_seg_size,
895 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
900 if (rte_mp == NULL) {
901 rte_exit(EXIT_FAILURE,
902 "Creation of mbuf pool for socket %u failed: %s\n",
903 socket_id, rte_strerror(rte_errno));
904 } else if (verbose_level > 0) {
905 rte_mempool_dump(stdout, rte_mp);
910 * Check given socket id is valid or not with NUMA mode,
911 * if valid, return 0, else return -1
914 check_socket_id(const unsigned int socket_id)
916 static int warning_once = 0;
918 if (new_socket_id(socket_id)) {
919 if (!warning_once && numa_support)
920 printf("Warning: NUMA should be configured manually by"
921 " using --port-numa-config and"
922 " --ring-numa-config parameters along with"
931 * Get the allowed maximum number of RX queues.
932 * *pid return the port id which has minimal value of
933 * max_rx_queues in all ports.
936 get_allowed_max_nb_rxq(portid_t *pid)
938 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
940 struct rte_eth_dev_info dev_info;
942 RTE_ETH_FOREACH_DEV(pi) {
943 rte_eth_dev_info_get(pi, &dev_info);
944 if (dev_info.max_rx_queues < allowed_max_rxq) {
945 allowed_max_rxq = dev_info.max_rx_queues;
949 return allowed_max_rxq;
953 * Check input rxq is valid or not.
954 * If input rxq is not greater than any of maximum number
955 * of RX queues of all ports, it is valid.
956 * if valid, return 0, else return -1
959 check_nb_rxq(queueid_t rxq)
961 queueid_t allowed_max_rxq;
964 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
965 if (rxq > allowed_max_rxq) {
966 printf("Fail: input rxq (%u) can't be greater "
967 "than max_rx_queues (%u) of port %u\n",
977 * Get the allowed maximum number of TX queues.
978 * *pid return the port id which has minimal value of
979 * max_tx_queues in all ports.
982 get_allowed_max_nb_txq(portid_t *pid)
984 queueid_t allowed_max_txq = MAX_QUEUE_ID;
986 struct rte_eth_dev_info dev_info;
988 RTE_ETH_FOREACH_DEV(pi) {
989 rte_eth_dev_info_get(pi, &dev_info);
990 if (dev_info.max_tx_queues < allowed_max_txq) {
991 allowed_max_txq = dev_info.max_tx_queues;
995 return allowed_max_txq;
999 * Check input txq is valid or not.
1000 * If input txq is not greater than any of maximum number
1001 * of TX queues of all ports, it is valid.
1002 * if valid, return 0, else return -1
1005 check_nb_txq(queueid_t txq)
1007 queueid_t allowed_max_txq;
1010 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1011 if (txq > allowed_max_txq) {
1012 printf("Fail: input txq (%u) can't be greater "
1013 "than max_tx_queues (%u) of port %u\n",
1026 struct rte_port *port;
1027 struct rte_mempool *mbp;
1028 unsigned int nb_mbuf_per_pool;
1030 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1031 struct rte_gro_param gro_param;
1035 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1037 /* Configuration of logical cores. */
1038 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1039 sizeof(struct fwd_lcore *) * nb_lcores,
1040 RTE_CACHE_LINE_SIZE);
1041 if (fwd_lcores == NULL) {
1042 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1043 "failed\n", nb_lcores);
1045 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1046 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1047 sizeof(struct fwd_lcore),
1048 RTE_CACHE_LINE_SIZE);
1049 if (fwd_lcores[lc_id] == NULL) {
1050 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1053 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1056 RTE_ETH_FOREACH_DEV(pid) {
1058 /* Apply default TxRx configuration for all ports */
1059 port->dev_conf.txmode = tx_mode;
1060 port->dev_conf.rxmode = rx_mode;
1061 rte_eth_dev_info_get(pid, &port->dev_info);
1063 if (!(port->dev_info.tx_offload_capa &
1064 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1065 port->dev_conf.txmode.offloads &=
1066 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1067 if (!(port->dev_info.tx_offload_capa &
1068 DEV_TX_OFFLOAD_MATCH_METADATA))
1069 port->dev_conf.txmode.offloads &=
1070 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1072 if (port_numa[pid] != NUMA_NO_CONFIG)
1073 port_per_socket[port_numa[pid]]++;
1075 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1078 * if socket_id is invalid,
1079 * set to the first available socket.
1081 if (check_socket_id(socket_id) < 0)
1082 socket_id = socket_ids[0];
1083 port_per_socket[socket_id]++;
1087 /* Apply Rx offloads configuration */
1088 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1089 port->rx_conf[k].offloads =
1090 port->dev_conf.rxmode.offloads;
1091 /* Apply Tx offloads configuration */
1092 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1093 port->tx_conf[k].offloads =
1094 port->dev_conf.txmode.offloads;
1096 /* set flag to initialize port/queue */
1097 port->need_reconfig = 1;
1098 port->need_reconfig_queues = 1;
1099 port->tx_metadata = 0;
1103 * Create pools of mbuf.
1104 * If NUMA support is disabled, create a single pool of mbuf in
1105 * socket 0 memory by default.
1106 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1108 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1109 * nb_txd can be configured at run time.
1111 if (param_total_num_mbufs)
1112 nb_mbuf_per_pool = param_total_num_mbufs;
1114 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1115 (nb_lcores * mb_mempool_cache) +
1116 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1117 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1123 for (i = 0; i < num_sockets; i++)
1124 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1127 if (socket_num == UMA_NO_CONFIG)
1128 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1130 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1136 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1137 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1139 * Records which Mbuf pool to use by each logical core, if needed.
1141 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1142 mbp = mbuf_pool_find(
1143 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1146 mbp = mbuf_pool_find(0);
1147 fwd_lcores[lc_id]->mbp = mbp;
1148 /* initialize GSO context */
1149 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1150 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1151 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1152 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1154 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1157 /* Configuration of packet forwarding streams. */
1158 if (init_fwd_streams() < 0)
1159 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1163 /* create a gro context for each lcore */
1164 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1165 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1166 gro_param.max_item_per_flow = MAX_PKT_BURST;
1167 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1168 gro_param.socket_id = rte_lcore_to_socket_id(
1169 fwd_lcores_cpuids[lc_id]);
1170 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1171 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1172 rte_exit(EXIT_FAILURE,
1173 "rte_gro_ctx_create() failed\n");
1177 #if defined RTE_LIBRTE_PMD_SOFTNIC
1178 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1179 RTE_ETH_FOREACH_DEV(pid) {
1181 const char *driver = port->dev_info.driver_name;
1183 if (strcmp(driver, "net_softnic") == 0)
1184 port->softport.fwd_lcore_arg = fwd_lcores;
1193 reconfig(portid_t new_port_id, unsigned socket_id)
1195 struct rte_port *port;
1197 /* Reconfiguration of Ethernet ports. */
1198 port = &ports[new_port_id];
1199 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1201 /* set flag to initialize port/queue */
1202 port->need_reconfig = 1;
1203 port->need_reconfig_queues = 1;
1204 port->socket_id = socket_id;
1211 init_fwd_streams(void)
1214 struct rte_port *port;
1215 streamid_t sm_id, nb_fwd_streams_new;
1218 /* set socket id according to numa or not */
1219 RTE_ETH_FOREACH_DEV(pid) {
1221 if (nb_rxq > port->dev_info.max_rx_queues) {
1222 printf("Fail: nb_rxq(%d) is greater than "
1223 "max_rx_queues(%d)\n", nb_rxq,
1224 port->dev_info.max_rx_queues);
1227 if (nb_txq > port->dev_info.max_tx_queues) {
1228 printf("Fail: nb_txq(%d) is greater than "
1229 "max_tx_queues(%d)\n", nb_txq,
1230 port->dev_info.max_tx_queues);
1234 if (port_numa[pid] != NUMA_NO_CONFIG)
1235 port->socket_id = port_numa[pid];
1237 port->socket_id = rte_eth_dev_socket_id(pid);
1240 * if socket_id is invalid,
1241 * set to the first available socket.
1243 if (check_socket_id(port->socket_id) < 0)
1244 port->socket_id = socket_ids[0];
1248 if (socket_num == UMA_NO_CONFIG)
1249 port->socket_id = 0;
1251 port->socket_id = socket_num;
1255 q = RTE_MAX(nb_rxq, nb_txq);
1257 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1260 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1261 if (nb_fwd_streams_new == nb_fwd_streams)
1264 if (fwd_streams != NULL) {
1265 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1266 if (fwd_streams[sm_id] == NULL)
1268 rte_free(fwd_streams[sm_id]);
1269 fwd_streams[sm_id] = NULL;
1271 rte_free(fwd_streams);
1276 nb_fwd_streams = nb_fwd_streams_new;
1277 if (nb_fwd_streams) {
1278 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1279 sizeof(struct fwd_stream *) * nb_fwd_streams,
1280 RTE_CACHE_LINE_SIZE);
1281 if (fwd_streams == NULL)
1282 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1283 " (struct fwd_stream *)) failed\n",
1286 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1287 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1288 " struct fwd_stream", sizeof(struct fwd_stream),
1289 RTE_CACHE_LINE_SIZE);
1290 if (fwd_streams[sm_id] == NULL)
1291 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1292 "(struct fwd_stream) failed\n");
1299 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1301 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1303 unsigned int total_burst;
1304 unsigned int nb_burst;
1305 unsigned int burst_stats[3];
1306 uint16_t pktnb_stats[3];
1308 int burst_percent[3];
1311 * First compute the total number of packet bursts and the
1312 * two highest numbers of bursts of the same number of packets.
1315 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1316 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1317 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1318 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1321 total_burst += nb_burst;
1322 if (nb_burst > burst_stats[0]) {
1323 burst_stats[1] = burst_stats[0];
1324 pktnb_stats[1] = pktnb_stats[0];
1325 burst_stats[0] = nb_burst;
1326 pktnb_stats[0] = nb_pkt;
1327 } else if (nb_burst > burst_stats[1]) {
1328 burst_stats[1] = nb_burst;
1329 pktnb_stats[1] = nb_pkt;
1332 if (total_burst == 0)
1334 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1335 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1336 burst_percent[0], (int) pktnb_stats[0]);
1337 if (burst_stats[0] == total_burst) {
1341 if (burst_stats[0] + burst_stats[1] == total_burst) {
1342 printf(" + %d%% of %d pkts]\n",
1343 100 - burst_percent[0], pktnb_stats[1]);
1346 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1347 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1348 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1349 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1352 printf(" + %d%% of %d pkts + %d%% of others]\n",
1353 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1355 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1358 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1360 struct rte_port *port;
1363 static const char *fwd_stats_border = "----------------------";
1365 port = &ports[port_id];
1366 printf("\n %s Forward statistics for port %-2d %s\n",
1367 fwd_stats_border, port_id, fwd_stats_border);
1369 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1370 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1372 stats->ipackets, stats->imissed,
1373 (uint64_t) (stats->ipackets + stats->imissed));
1375 if (cur_fwd_eng == &csum_fwd_engine)
1376 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1377 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1378 port->rx_bad_outer_l4_csum);
1379 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1380 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1381 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1384 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1386 stats->opackets, port->tx_dropped,
1387 (uint64_t) (stats->opackets + port->tx_dropped));
1390 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1392 stats->ipackets, stats->imissed,
1393 (uint64_t) (stats->ipackets + stats->imissed));
1395 if (cur_fwd_eng == &csum_fwd_engine)
1396 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
1397 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1398 port->rx_bad_outer_l4_csum);
1399 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1400 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1401 printf(" RX-nombufs: %14"PRIu64"\n",
1405 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1407 stats->opackets, port->tx_dropped,
1408 (uint64_t) (stats->opackets + port->tx_dropped));
1411 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1412 if (port->rx_stream)
1413 pkt_burst_stats_display("RX",
1414 &port->rx_stream->rx_burst_stats);
1415 if (port->tx_stream)
1416 pkt_burst_stats_display("TX",
1417 &port->tx_stream->tx_burst_stats);
1420 if (port->rx_queue_stats_mapping_enabled) {
1422 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1423 printf(" Stats reg %2d RX-packets:%14"PRIu64
1424 " RX-errors:%14"PRIu64
1425 " RX-bytes:%14"PRIu64"\n",
1426 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1430 if (port->tx_queue_stats_mapping_enabled) {
1431 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1432 printf(" Stats reg %2d TX-packets:%14"PRIu64
1433 " TX-bytes:%14"PRIu64"\n",
1434 i, stats->q_opackets[i], stats->q_obytes[i]);
1438 printf(" %s--------------------------------%s\n",
1439 fwd_stats_border, fwd_stats_border);
1443 fwd_stream_stats_display(streamid_t stream_id)
1445 struct fwd_stream *fs;
1446 static const char *fwd_top_stats_border = "-------";
1448 fs = fwd_streams[stream_id];
1449 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1450 (fs->fwd_dropped == 0))
1452 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1453 "TX Port=%2d/Queue=%2d %s\n",
1454 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1455 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1456 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1457 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1459 /* if checksum mode */
1460 if (cur_fwd_eng == &csum_fwd_engine) {
1461 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1462 "%-14u Rx- bad outer L4 checksum: %-14u\n",
1463 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1464 fs->rx_bad_outer_l4_csum);
1467 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1468 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1469 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1474 flush_fwd_rx_queues(void)
1476 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1483 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1484 uint64_t timer_period;
1486 /* convert to number of cycles */
1487 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1489 for (j = 0; j < 2; j++) {
1490 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1491 for (rxq = 0; rxq < nb_rxq; rxq++) {
1492 port_id = fwd_ports_ids[rxp];
1494 * testpmd can stuck in the below do while loop
1495 * if rte_eth_rx_burst() always returns nonzero
1496 * packets. So timer is added to exit this loop
1497 * after 1sec timer expiry.
1499 prev_tsc = rte_rdtsc();
1501 nb_rx = rte_eth_rx_burst(port_id, rxq,
1502 pkts_burst, MAX_PKT_BURST);
1503 for (i = 0; i < nb_rx; i++)
1504 rte_pktmbuf_free(pkts_burst[i]);
1506 cur_tsc = rte_rdtsc();
1507 diff_tsc = cur_tsc - prev_tsc;
1508 timer_tsc += diff_tsc;
1509 } while ((nb_rx > 0) &&
1510 (timer_tsc < timer_period));
1514 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1519 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1521 struct fwd_stream **fsm;
1524 #ifdef RTE_LIBRTE_BITRATE
1525 uint64_t tics_per_1sec;
1526 uint64_t tics_datum;
1527 uint64_t tics_current;
1528 uint16_t i, cnt_ports;
1530 cnt_ports = nb_ports;
1531 tics_datum = rte_rdtsc();
1532 tics_per_1sec = rte_get_timer_hz();
1534 fsm = &fwd_streams[fc->stream_idx];
1535 nb_fs = fc->stream_nb;
1537 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1538 (*pkt_fwd)(fsm[sm_id]);
1539 #ifdef RTE_LIBRTE_BITRATE
1540 if (bitrate_enabled != 0 &&
1541 bitrate_lcore_id == rte_lcore_id()) {
1542 tics_current = rte_rdtsc();
1543 if (tics_current - tics_datum >= tics_per_1sec) {
1544 /* Periodic bitrate calculation */
1545 for (i = 0; i < cnt_ports; i++)
1546 rte_stats_bitrate_calc(bitrate_data,
1548 tics_datum = tics_current;
1552 #ifdef RTE_LIBRTE_LATENCY_STATS
1553 if (latencystats_enabled != 0 &&
1554 latencystats_lcore_id == rte_lcore_id())
1555 rte_latencystats_update();
1558 } while (! fc->stopped);
1562 start_pkt_forward_on_core(void *fwd_arg)
1564 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1565 cur_fwd_config.fwd_eng->packet_fwd);
1570 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1571 * Used to start communication flows in network loopback test configurations.
1574 run_one_txonly_burst_on_core(void *fwd_arg)
1576 struct fwd_lcore *fwd_lc;
1577 struct fwd_lcore tmp_lcore;
1579 fwd_lc = (struct fwd_lcore *) fwd_arg;
1580 tmp_lcore = *fwd_lc;
1581 tmp_lcore.stopped = 1;
1582 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1587 * Launch packet forwarding:
1588 * - Setup per-port forwarding context.
1589 * - launch logical cores with their forwarding configuration.
1592 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1594 port_fwd_begin_t port_fwd_begin;
1599 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1600 if (port_fwd_begin != NULL) {
1601 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1602 (*port_fwd_begin)(fwd_ports_ids[i]);
1604 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1605 lc_id = fwd_lcores_cpuids[i];
1606 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1607 fwd_lcores[i]->stopped = 0;
1608 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1609 fwd_lcores[i], lc_id);
1611 printf("launch lcore %u failed - diag=%d\n",
1618 * Launch packet forwarding configuration.
1621 start_packet_forwarding(int with_tx_first)
1623 port_fwd_begin_t port_fwd_begin;
1624 port_fwd_end_t port_fwd_end;
1625 struct rte_port *port;
1630 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1631 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1633 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1634 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1636 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1637 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1638 (!nb_rxq || !nb_txq))
1639 rte_exit(EXIT_FAILURE,
1640 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1641 cur_fwd_eng->fwd_mode_name);
1643 if (all_ports_started() == 0) {
1644 printf("Not all ports were started\n");
1647 if (test_done == 0) {
1648 printf("Packet forwarding already started\n");
1654 for (i = 0; i < nb_fwd_ports; i++) {
1655 pt_id = fwd_ports_ids[i];
1656 port = &ports[pt_id];
1657 if (!port->dcb_flag) {
1658 printf("In DCB mode, all forwarding ports must "
1659 "be configured in this mode.\n");
1663 if (nb_fwd_lcores == 1) {
1664 printf("In DCB mode,the nb forwarding cores "
1665 "should be larger than 1.\n");
1674 flush_fwd_rx_queues();
1676 pkt_fwd_config_display(&cur_fwd_config);
1677 rxtx_config_display();
1679 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1680 pt_id = fwd_ports_ids[i];
1681 port = &ports[pt_id];
1682 rte_eth_stats_get(pt_id, &port->stats);
1683 port->tx_dropped = 0;
1685 map_port_queue_stats_mapping_registers(pt_id, port);
1687 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1688 fwd_streams[sm_id]->rx_packets = 0;
1689 fwd_streams[sm_id]->tx_packets = 0;
1690 fwd_streams[sm_id]->fwd_dropped = 0;
1691 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1692 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1693 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1695 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1696 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1697 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1698 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1699 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1701 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1702 fwd_streams[sm_id]->core_cycles = 0;
1705 if (with_tx_first) {
1706 port_fwd_begin = tx_only_engine.port_fwd_begin;
1707 if (port_fwd_begin != NULL) {
1708 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1709 (*port_fwd_begin)(fwd_ports_ids[i]);
1711 while (with_tx_first--) {
1712 launch_packet_forwarding(
1713 run_one_txonly_burst_on_core);
1714 rte_eal_mp_wait_lcore();
1716 port_fwd_end = tx_only_engine.port_fwd_end;
1717 if (port_fwd_end != NULL) {
1718 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1719 (*port_fwd_end)(fwd_ports_ids[i]);
1722 launch_packet_forwarding(start_pkt_forward_on_core);
1726 stop_packet_forwarding(void)
1728 struct rte_eth_stats stats;
1729 struct rte_port *port;
1730 port_fwd_end_t port_fwd_end;
1735 uint64_t total_recv;
1736 uint64_t total_xmit;
1737 uint64_t total_rx_dropped;
1738 uint64_t total_tx_dropped;
1739 uint64_t total_rx_nombuf;
1740 uint64_t tx_dropped;
1741 uint64_t rx_bad_ip_csum;
1742 uint64_t rx_bad_l4_csum;
1743 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1744 uint64_t fwd_cycles;
1747 static const char *acc_stats_border = "+++++++++++++++";
1750 printf("Packet forwarding not started\n");
1753 printf("Telling cores to stop...");
1754 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1755 fwd_lcores[lc_id]->stopped = 1;
1756 printf("\nWaiting for lcores to finish...\n");
1757 rte_eal_mp_wait_lcore();
1758 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1759 if (port_fwd_end != NULL) {
1760 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1761 pt_id = fwd_ports_ids[i];
1762 (*port_fwd_end)(pt_id);
1765 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1768 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1769 if (cur_fwd_config.nb_fwd_streams >
1770 cur_fwd_config.nb_fwd_ports) {
1771 fwd_stream_stats_display(sm_id);
1772 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1773 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1775 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1777 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1780 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1781 tx_dropped = (uint64_t) (tx_dropped +
1782 fwd_streams[sm_id]->fwd_dropped);
1783 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1786 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1787 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1788 fwd_streams[sm_id]->rx_bad_ip_csum);
1789 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1793 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1794 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1795 fwd_streams[sm_id]->rx_bad_l4_csum);
1796 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1799 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1800 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1802 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1803 fwd_cycles = (uint64_t) (fwd_cycles +
1804 fwd_streams[sm_id]->core_cycles);
1809 total_rx_dropped = 0;
1810 total_tx_dropped = 0;
1811 total_rx_nombuf = 0;
1812 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1813 pt_id = fwd_ports_ids[i];
1815 port = &ports[pt_id];
1816 rte_eth_stats_get(pt_id, &stats);
1817 stats.ipackets -= port->stats.ipackets;
1818 port->stats.ipackets = 0;
1819 stats.opackets -= port->stats.opackets;
1820 port->stats.opackets = 0;
1821 stats.ibytes -= port->stats.ibytes;
1822 port->stats.ibytes = 0;
1823 stats.obytes -= port->stats.obytes;
1824 port->stats.obytes = 0;
1825 stats.imissed -= port->stats.imissed;
1826 port->stats.imissed = 0;
1827 stats.oerrors -= port->stats.oerrors;
1828 port->stats.oerrors = 0;
1829 stats.rx_nombuf -= port->stats.rx_nombuf;
1830 port->stats.rx_nombuf = 0;
1832 total_recv += stats.ipackets;
1833 total_xmit += stats.opackets;
1834 total_rx_dropped += stats.imissed;
1835 total_tx_dropped += port->tx_dropped;
1836 total_rx_nombuf += stats.rx_nombuf;
1838 fwd_port_stats_display(pt_id, &stats);
1841 printf("\n %s Accumulated forward statistics for all ports"
1843 acc_stats_border, acc_stats_border);
1844 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1846 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1848 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1849 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1850 if (total_rx_nombuf > 0)
1851 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1852 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1854 acc_stats_border, acc_stats_border);
1855 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1857 printf("\n CPU cycles/packet=%u (total cycles="
1858 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1859 (unsigned int)(fwd_cycles / total_recv),
1860 fwd_cycles, total_recv);
1862 printf("\nDone.\n");
1867 dev_set_link_up(portid_t pid)
1869 if (rte_eth_dev_set_link_up(pid) < 0)
1870 printf("\nSet link up fail.\n");
1874 dev_set_link_down(portid_t pid)
1876 if (rte_eth_dev_set_link_down(pid) < 0)
1877 printf("\nSet link down fail.\n");
1881 all_ports_started(void)
1884 struct rte_port *port;
1886 RTE_ETH_FOREACH_DEV(pi) {
1888 /* Check if there is a port which is not started */
1889 if ((port->port_status != RTE_PORT_STARTED) &&
1890 (port->slave_flag == 0))
1894 /* No port is not started */
1899 port_is_stopped(portid_t port_id)
1901 struct rte_port *port = &ports[port_id];
1903 if ((port->port_status != RTE_PORT_STOPPED) &&
1904 (port->slave_flag == 0))
1910 all_ports_stopped(void)
1914 RTE_ETH_FOREACH_DEV(pi) {
1915 if (!port_is_stopped(pi))
1923 port_is_started(portid_t port_id)
1925 if (port_id_is_invalid(port_id, ENABLED_WARN))
1928 if (ports[port_id].port_status != RTE_PORT_STARTED)
1935 start_port(portid_t pid)
1937 int diag, need_check_link_status = -1;
1940 struct rte_port *port;
1941 struct ether_addr mac_addr;
1942 enum rte_eth_event_type event_type;
1944 if (port_id_is_invalid(pid, ENABLED_WARN))
1949 RTE_ETH_FOREACH_DEV(pi) {
1950 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1953 need_check_link_status = 0;
1955 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1956 RTE_PORT_HANDLING) == 0) {
1957 printf("Port %d is now not stopped\n", pi);
1961 if (port->need_reconfig > 0) {
1962 port->need_reconfig = 0;
1964 if (flow_isolate_all) {
1965 int ret = port_flow_isolate(pi, 1);
1967 printf("Failed to apply isolated"
1968 " mode on port %d\n", pi);
1972 configure_rxtx_dump_callbacks(0);
1973 printf("Configuring Port %d (socket %u)\n", pi,
1975 /* configure port */
1976 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1979 if (rte_atomic16_cmpset(&(port->port_status),
1980 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1981 printf("Port %d can not be set back "
1982 "to stopped\n", pi);
1983 printf("Fail to configure port %d\n", pi);
1984 /* try to reconfigure port next time */
1985 port->need_reconfig = 1;
1989 if (port->need_reconfig_queues > 0) {
1990 port->need_reconfig_queues = 0;
1991 /* setup tx queues */
1992 for (qi = 0; qi < nb_txq; qi++) {
1993 if ((numa_support) &&
1994 (txring_numa[pi] != NUMA_NO_CONFIG))
1995 diag = rte_eth_tx_queue_setup(pi, qi,
1996 port->nb_tx_desc[qi],
1998 &(port->tx_conf[qi]));
2000 diag = rte_eth_tx_queue_setup(pi, qi,
2001 port->nb_tx_desc[qi],
2003 &(port->tx_conf[qi]));
2008 /* Fail to setup tx queue, return */
2009 if (rte_atomic16_cmpset(&(port->port_status),
2011 RTE_PORT_STOPPED) == 0)
2012 printf("Port %d can not be set back "
2013 "to stopped\n", pi);
2014 printf("Fail to configure port %d tx queues\n",
2016 /* try to reconfigure queues next time */
2017 port->need_reconfig_queues = 1;
2020 for (qi = 0; qi < nb_rxq; qi++) {
2021 /* setup rx queues */
2022 if ((numa_support) &&
2023 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2024 struct rte_mempool * mp =
2025 mbuf_pool_find(rxring_numa[pi]);
2027 printf("Failed to setup RX queue:"
2028 "No mempool allocation"
2029 " on the socket %d\n",
2034 diag = rte_eth_rx_queue_setup(pi, qi,
2035 port->nb_rx_desc[qi],
2037 &(port->rx_conf[qi]),
2040 struct rte_mempool *mp =
2041 mbuf_pool_find(port->socket_id);
2043 printf("Failed to setup RX queue:"
2044 "No mempool allocation"
2045 " on the socket %d\n",
2049 diag = rte_eth_rx_queue_setup(pi, qi,
2050 port->nb_rx_desc[qi],
2052 &(port->rx_conf[qi]),
2058 /* Fail to setup rx queue, return */
2059 if (rte_atomic16_cmpset(&(port->port_status),
2061 RTE_PORT_STOPPED) == 0)
2062 printf("Port %d can not be set back "
2063 "to stopped\n", pi);
2064 printf("Fail to configure port %d rx queues\n",
2066 /* try to reconfigure queues next time */
2067 port->need_reconfig_queues = 1;
2071 configure_rxtx_dump_callbacks(verbose_level);
2073 if (rte_eth_dev_start(pi) < 0) {
2074 printf("Fail to start port %d\n", pi);
2076 /* Fail to setup rx queue, return */
2077 if (rte_atomic16_cmpset(&(port->port_status),
2078 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2079 printf("Port %d can not be set back to "
2084 if (rte_atomic16_cmpset(&(port->port_status),
2085 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2086 printf("Port %d can not be set into started\n", pi);
2088 rte_eth_macaddr_get(pi, &mac_addr);
2089 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2090 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2091 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2092 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2094 /* at least one port started, need checking link status */
2095 need_check_link_status = 1;
2098 for (event_type = RTE_ETH_EVENT_UNKNOWN;
2099 event_type < RTE_ETH_EVENT_MAX;
2101 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2106 printf("Failed to setup even callback for event %d\n",
2112 if (need_check_link_status == 1 && !no_link_check)
2113 check_all_ports_link_status(RTE_PORT_ALL);
2114 else if (need_check_link_status == 0)
2115 printf("Please stop the ports first\n");
2122 stop_port(portid_t pid)
2125 struct rte_port *port;
2126 int need_check_link_status = 0;
2133 if (port_id_is_invalid(pid, ENABLED_WARN))
2136 printf("Stopping ports...\n");
2138 RTE_ETH_FOREACH_DEV(pi) {
2139 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2142 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143 printf("Please remove port %d from forwarding configuration.\n", pi);
2147 if (port_is_bonding_slave(pi)) {
2148 printf("Please remove port %d from bonded device.\n", pi);
2153 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154 RTE_PORT_HANDLING) == 0)
2157 rte_eth_dev_stop(pi);
2159 if (rte_atomic16_cmpset(&(port->port_status),
2160 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161 printf("Port %d can not be set into stopped\n", pi);
2162 need_check_link_status = 1;
2164 if (need_check_link_status && !no_link_check)
2165 check_all_ports_link_status(RTE_PORT_ALL);
2171 remove_invalid_ports_in(portid_t *array, portid_t *total)
2174 portid_t new_total = 0;
2176 for (i = 0; i < *total; i++)
2177 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2178 array[new_total] = array[i];
2185 remove_invalid_ports(void)
2187 remove_invalid_ports_in(ports_ids, &nb_ports);
2188 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2189 nb_cfg_ports = nb_fwd_ports;
2193 close_port(portid_t pid)
2196 struct rte_port *port;
2198 if (port_id_is_invalid(pid, ENABLED_WARN))
2201 printf("Closing ports...\n");
2203 RTE_ETH_FOREACH_DEV(pi) {
2204 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2207 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208 printf("Please remove port %d from forwarding configuration.\n", pi);
2212 if (port_is_bonding_slave(pi)) {
2213 printf("Please remove port %d from bonded device.\n", pi);
2218 if (rte_atomic16_cmpset(&(port->port_status),
2219 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220 printf("Port %d is already closed\n", pi);
2224 if (rte_atomic16_cmpset(&(port->port_status),
2225 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226 printf("Port %d is now not stopped\n", pi);
2230 if (port->flow_list)
2231 port_flow_flush(pi);
2232 rte_eth_dev_close(pi);
2234 remove_invalid_ports();
2236 if (rte_atomic16_cmpset(&(port->port_status),
2237 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238 printf("Port %d cannot be set to closed\n", pi);
2245 reset_port(portid_t pid)
2249 struct rte_port *port;
2251 if (port_id_is_invalid(pid, ENABLED_WARN))
2254 printf("Resetting ports...\n");
2256 RTE_ETH_FOREACH_DEV(pi) {
2257 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2260 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2261 printf("Please remove port %d from forwarding "
2262 "configuration.\n", pi);
2266 if (port_is_bonding_slave(pi)) {
2267 printf("Please remove port %d from bonded device.\n",
2272 diag = rte_eth_dev_reset(pi);
2275 port->need_reconfig = 1;
2276 port->need_reconfig_queues = 1;
2278 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2286 attach_port(char *identifier)
2289 struct rte_dev_iterator iterator;
2291 printf("Attaching a new port...\n");
2293 if (identifier == NULL) {
2294 printf("Invalid parameters are specified\n");
2298 if (rte_dev_probe(identifier) != 0) {
2299 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2303 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator)
2304 setup_attached_port(pi);
2308 setup_attached_port(portid_t pi)
2310 unsigned int socket_id;
2312 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2313 /* if socket_id is invalid, set to the first available socket. */
2314 if (check_socket_id(socket_id) < 0)
2315 socket_id = socket_ids[0];
2316 reconfig(pi, socket_id);
2317 rte_eth_promiscuous_enable(pi);
2319 ports_ids[nb_ports++] = pi;
2320 fwd_ports_ids[nb_fwd_ports++] = pi;
2321 nb_cfg_ports = nb_fwd_ports;
2322 ports[pi].port_status = RTE_PORT_STOPPED;
2324 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2329 detach_port_device(portid_t port_id)
2331 struct rte_device *dev;
2334 printf("Removing a device...\n");
2336 dev = rte_eth_devices[port_id].device;
2338 printf("Device already removed\n");
2342 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2343 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2344 printf("Port not stopped\n");
2347 printf("Port was not closed\n");
2348 if (ports[port_id].flow_list)
2349 port_flow_flush(port_id);
2352 if (rte_dev_remove(dev) != 0) {
2353 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2357 for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2358 if (rte_eth_devices[sibling].device != dev)
2360 /* reset mapping between old ports and removed device */
2361 rte_eth_devices[sibling].device = NULL;
2362 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2363 /* sibling ports are forced to be closed */
2364 ports[sibling].port_status = RTE_PORT_CLOSED;
2365 printf("Port %u is closed\n", sibling);
2369 remove_invalid_ports();
2371 printf("Device of port %u is detached\n", port_id);
2372 printf("Now total ports is %d\n", nb_ports);
2380 struct rte_device *device;
2385 stop_packet_forwarding();
2387 if (ports != NULL) {
2389 RTE_ETH_FOREACH_DEV(pt_id) {
2390 printf("\nShutting down port %d...\n", pt_id);
2396 * This is a workaround to fix a virtio-user issue that
2397 * requires to call clean-up routine to remove existing
2399 * This workaround valid only for testpmd, needs a fix
2400 * valid for all applications.
2401 * TODO: Implement proper resource cleanup
2403 device = rte_eth_devices[pt_id].device;
2404 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2405 detach_port_device(pt_id);
2410 ret = rte_dev_event_monitor_stop();
2413 "fail to stop device event monitor.");
2417 ret = rte_dev_event_callback_unregister(NULL,
2418 eth_dev_event_callback, NULL);
2421 "fail to unregister device event callback.\n");
2425 ret = rte_dev_hotplug_handle_disable();
2428 "fail to disable hotplug handling.\n");
2433 printf("\nBye...\n");
2436 typedef void (*cmd_func_t)(void);
2437 struct pmd_test_command {
2438 const char *cmd_name;
2439 cmd_func_t cmd_func;
2442 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2444 /* Check the link status of all ports in up to 9s, and print them finally */
2446 check_all_ports_link_status(uint32_t port_mask)
2448 #define CHECK_INTERVAL 100 /* 100ms */
2449 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2451 uint8_t count, all_ports_up, print_flag = 0;
2452 struct rte_eth_link link;
2454 printf("Checking link statuses...\n");
2456 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2458 RTE_ETH_FOREACH_DEV(portid) {
2459 if ((port_mask & (1 << portid)) == 0)
2461 memset(&link, 0, sizeof(link));
2462 rte_eth_link_get_nowait(portid, &link);
2463 /* print link status if flag set */
2464 if (print_flag == 1) {
2465 if (link.link_status)
2467 "Port%d Link Up. speed %u Mbps- %s\n",
2468 portid, link.link_speed,
2469 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2470 ("full-duplex") : ("half-duplex\n"));
2472 printf("Port %d Link Down\n", portid);
2475 /* clear all_ports_up flag if any link down */
2476 if (link.link_status == ETH_LINK_DOWN) {
2481 /* after finally printing all link status, get out */
2482 if (print_flag == 1)
2485 if (all_ports_up == 0) {
2487 rte_delay_ms(CHECK_INTERVAL);
2490 /* set the print_flag if all ports up or timeout */
2491 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2501 rmv_event_callback(void *arg)
2503 int need_to_start = 0;
2504 int org_no_link_check = no_link_check;
2505 portid_t port_id = (intptr_t)arg;
2507 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2509 if (!test_done && port_is_forwarding(port_id)) {
2511 stop_packet_forwarding();
2515 no_link_check = org_no_link_check;
2516 close_port(port_id);
2517 detach_port_device(port_id);
2519 start_packet_forwarding(0);
2522 /* This function is used by the interrupt thread */
2524 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2527 static const char * const event_desc[] = {
2528 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2529 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2530 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2531 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2532 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2533 [RTE_ETH_EVENT_IPSEC] = "IPsec",
2534 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2535 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2536 [RTE_ETH_EVENT_NEW] = "device probed",
2537 [RTE_ETH_EVENT_DESTROY] = "device released",
2538 [RTE_ETH_EVENT_MAX] = NULL,
2541 RTE_SET_USED(param);
2542 RTE_SET_USED(ret_param);
2544 if (type >= RTE_ETH_EVENT_MAX) {
2545 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2546 port_id, __func__, type);
2548 } else if (event_print_mask & (UINT32_C(1) << type)) {
2549 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2554 if (port_id_is_invalid(port_id, DISABLED_WARN))
2558 case RTE_ETH_EVENT_INTR_RMV:
2559 if (rte_eal_alarm_set(100000,
2560 rmv_event_callback, (void *)(intptr_t)port_id))
2561 fprintf(stderr, "Could not set up deferred device removal\n");
2569 /* This function is used by the interrupt thread */
2571 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2572 __rte_unused void *arg)
2577 if (type >= RTE_DEV_EVENT_MAX) {
2578 fprintf(stderr, "%s called upon invalid event %d\n",
2584 case RTE_DEV_EVENT_REMOVE:
2585 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2587 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2589 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2593 rmv_event_callback((void *)(intptr_t)port_id);
2595 case RTE_DEV_EVENT_ADD:
2596 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2598 /* TODO: After finish kernel driver binding,
2599 * begin to attach port.
2608 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2612 uint8_t mapping_found = 0;
2614 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2615 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2616 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2617 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2618 tx_queue_stats_mappings[i].queue_id,
2619 tx_queue_stats_mappings[i].stats_counter_id);
2626 port->tx_queue_stats_mapping_enabled = 1;
2631 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2635 uint8_t mapping_found = 0;
2637 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2638 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2639 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2640 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2641 rx_queue_stats_mappings[i].queue_id,
2642 rx_queue_stats_mappings[i].stats_counter_id);
2649 port->rx_queue_stats_mapping_enabled = 1;
2654 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2658 diag = set_tx_queue_stats_mapping_registers(pi, port);
2660 if (diag == -ENOTSUP) {
2661 port->tx_queue_stats_mapping_enabled = 0;
2662 printf("TX queue stats mapping not supported port id=%d\n", pi);
2665 rte_exit(EXIT_FAILURE,
2666 "set_tx_queue_stats_mapping_registers "
2667 "failed for port id=%d diag=%d\n",
2671 diag = set_rx_queue_stats_mapping_registers(pi, port);
2673 if (diag == -ENOTSUP) {
2674 port->rx_queue_stats_mapping_enabled = 0;
2675 printf("RX queue stats mapping not supported port id=%d\n", pi);
2678 rte_exit(EXIT_FAILURE,
2679 "set_rx_queue_stats_mapping_registers "
2680 "failed for port id=%d diag=%d\n",
2686 rxtx_port_config(struct rte_port *port)
2690 for (qid = 0; qid < nb_rxq; qid++) {
2691 port->rx_conf[qid] = port->dev_info.default_rxconf;
2693 /* Check if any Rx parameters have been passed */
2694 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2695 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2697 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2698 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2700 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2701 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2703 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2704 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2706 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2707 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2709 port->nb_rx_desc[qid] = nb_rxd;
2712 for (qid = 0; qid < nb_txq; qid++) {
2713 port->tx_conf[qid] = port->dev_info.default_txconf;
2715 /* Check if any Tx parameters have been passed */
2716 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2717 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2719 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2720 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2722 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2723 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2725 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2726 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2728 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2729 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2731 port->nb_tx_desc[qid] = nb_txd;
2736 init_port_config(void)
2739 struct rte_port *port;
2741 RTE_ETH_FOREACH_DEV(pid) {
2743 port->dev_conf.fdir_conf = fdir_conf;
2744 rte_eth_dev_info_get(pid, &port->dev_info);
2746 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2747 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2748 rss_hf & port->dev_info.flow_type_rss_offloads;
2750 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2751 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2754 if (port->dcb_flag == 0) {
2755 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2756 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2758 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2761 rxtx_port_config(port);
2763 rte_eth_macaddr_get(pid, &port->eth_addr);
2765 map_port_queue_stats_mapping_registers(pid, port);
2766 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2767 rte_pmd_ixgbe_bypass_init(pid);
2770 if (lsc_interrupt &&
2771 (rte_eth_devices[pid].data->dev_flags &
2772 RTE_ETH_DEV_INTR_LSC))
2773 port->dev_conf.intr_conf.lsc = 1;
2774 if (rmv_interrupt &&
2775 (rte_eth_devices[pid].data->dev_flags &
2776 RTE_ETH_DEV_INTR_RMV))
2777 port->dev_conf.intr_conf.rmv = 1;
2781 void set_port_slave_flag(portid_t slave_pid)
2783 struct rte_port *port;
2785 port = &ports[slave_pid];
2786 port->slave_flag = 1;
2789 void clear_port_slave_flag(portid_t slave_pid)
2791 struct rte_port *port;
2793 port = &ports[slave_pid];
2794 port->slave_flag = 0;
2797 uint8_t port_is_bonding_slave(portid_t slave_pid)
2799 struct rte_port *port;
2801 port = &ports[slave_pid];
2802 if ((rte_eth_devices[slave_pid].data->dev_flags &
2803 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2808 const uint16_t vlan_tags[] = {
2809 0, 1, 2, 3, 4, 5, 6, 7,
2810 8, 9, 10, 11, 12, 13, 14, 15,
2811 16, 17, 18, 19, 20, 21, 22, 23,
2812 24, 25, 26, 27, 28, 29, 30, 31
2816 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2817 enum dcb_mode_enable dcb_mode,
2818 enum rte_eth_nb_tcs num_tcs,
2823 struct rte_eth_rss_conf rss_conf;
2826 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2827 * given above, and the number of traffic classes available for use.
2829 if (dcb_mode == DCB_VT_ENABLED) {
2830 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2831 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2832 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2833 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2835 /* VMDQ+DCB RX and TX configurations */
2836 vmdq_rx_conf->enable_default_pool = 0;
2837 vmdq_rx_conf->default_pool = 0;
2838 vmdq_rx_conf->nb_queue_pools =
2839 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2840 vmdq_tx_conf->nb_queue_pools =
2841 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2843 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2844 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2845 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2846 vmdq_rx_conf->pool_map[i].pools =
2847 1 << (i % vmdq_rx_conf->nb_queue_pools);
2849 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2850 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2851 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2854 /* set DCB mode of RX and TX of multiple queues */
2855 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2856 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2858 struct rte_eth_dcb_rx_conf *rx_conf =
2859 ð_conf->rx_adv_conf.dcb_rx_conf;
2860 struct rte_eth_dcb_tx_conf *tx_conf =
2861 ð_conf->tx_adv_conf.dcb_tx_conf;
2863 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2867 rx_conf->nb_tcs = num_tcs;
2868 tx_conf->nb_tcs = num_tcs;
2870 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2871 rx_conf->dcb_tc[i] = i % num_tcs;
2872 tx_conf->dcb_tc[i] = i % num_tcs;
2875 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2876 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2877 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2881 eth_conf->dcb_capability_en =
2882 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2884 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2890 init_port_dcb_config(portid_t pid,
2891 enum dcb_mode_enable dcb_mode,
2892 enum rte_eth_nb_tcs num_tcs,
2895 struct rte_eth_conf port_conf;
2896 struct rte_port *rte_port;
2900 rte_port = &ports[pid];
2902 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2903 /* Enter DCB configuration status */
2906 port_conf.rxmode = rte_port->dev_conf.rxmode;
2907 port_conf.txmode = rte_port->dev_conf.txmode;
2909 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2910 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2913 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2915 /* re-configure the device . */
2916 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2918 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2920 /* If dev_info.vmdq_pool_base is greater than 0,
2921 * the queue id of vmdq pools is started after pf queues.
2923 if (dcb_mode == DCB_VT_ENABLED &&
2924 rte_port->dev_info.vmdq_pool_base > 0) {
2925 printf("VMDQ_DCB multi-queue mode is nonsensical"
2926 " for port %d.", pid);
2930 /* Assume the ports in testpmd have the same dcb capability
2931 * and has the same number of rxq and txq in dcb mode
2933 if (dcb_mode == DCB_VT_ENABLED) {
2934 if (rte_port->dev_info.max_vfs > 0) {
2935 nb_rxq = rte_port->dev_info.nb_rx_queues;
2936 nb_txq = rte_port->dev_info.nb_tx_queues;
2938 nb_rxq = rte_port->dev_info.max_rx_queues;
2939 nb_txq = rte_port->dev_info.max_tx_queues;
2942 /*if vt is disabled, use all pf queues */
2943 if (rte_port->dev_info.vmdq_pool_base == 0) {
2944 nb_rxq = rte_port->dev_info.max_rx_queues;
2945 nb_txq = rte_port->dev_info.max_tx_queues;
2947 nb_rxq = (queueid_t)num_tcs;
2948 nb_txq = (queueid_t)num_tcs;
2952 rx_free_thresh = 64;
2954 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2956 rxtx_port_config(rte_port);
2958 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2959 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2960 rx_vft_set(pid, vlan_tags[i], 1);
2962 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2963 map_port_queue_stats_mapping_registers(pid, rte_port);
2965 rte_port->dcb_flag = 1;
2973 /* Configuration of Ethernet ports. */
2974 ports = rte_zmalloc("testpmd: ports",
2975 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2976 RTE_CACHE_LINE_SIZE);
2977 if (ports == NULL) {
2978 rte_exit(EXIT_FAILURE,
2979 "rte_zmalloc(%d struct rte_port) failed\n",
2983 /* Initialize ports NUMA structures */
2984 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2985 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2986 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3000 const char clr[] = { 27, '[', '2', 'J', '\0' };
3001 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3003 /* Clear screen and move to top left */
3004 printf("%s%s", clr, top_left);
3006 printf("\nPort statistics ====================================");
3007 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3008 nic_stats_display(fwd_ports_ids[i]);
3012 signal_handler(int signum)
3014 if (signum == SIGINT || signum == SIGTERM) {
3015 printf("\nSignal %d received, preparing to exit...\n",
3017 #ifdef RTE_LIBRTE_PDUMP
3018 /* uninitialize packet capture framework */
3021 #ifdef RTE_LIBRTE_LATENCY_STATS
3022 rte_latencystats_uninit();
3025 /* Set flag to indicate the force termination. */
3027 /* exit with the expected status */
3028 signal(signum, SIG_DFL);
3029 kill(getpid(), signum);
3034 main(int argc, char** argv)
3041 signal(SIGINT, signal_handler);
3042 signal(SIGTERM, signal_handler);
3044 diag = rte_eal_init(argc, argv);
3046 rte_panic("Cannot init EAL\n");
3048 testpmd_logtype = rte_log_register("testpmd");
3049 if (testpmd_logtype < 0)
3050 rte_panic("Cannot register log type");
3051 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3053 #ifdef RTE_LIBRTE_PDUMP
3054 /* initialize packet capture framework */
3055 rte_pdump_init(NULL);
3059 RTE_ETH_FOREACH_DEV(port_id) {
3060 ports_ids[count] = port_id;
3063 nb_ports = (portid_t) count;
3065 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3067 /* allocate port structures, and init them */
3070 set_def_fwd_config();
3072 rte_panic("Empty set of forwarding logical cores - check the "
3073 "core mask supplied in the command parameters\n");
3075 /* Bitrate/latency stats disabled by default */
3076 #ifdef RTE_LIBRTE_BITRATE
3077 bitrate_enabled = 0;
3079 #ifdef RTE_LIBRTE_LATENCY_STATS
3080 latencystats_enabled = 0;
3083 /* on FreeBSD, mlockall() is disabled by default */
3084 #ifdef RTE_EXEC_ENV_BSDAPP
3093 launch_args_parse(argc, argv);
3095 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3096 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3100 if (tx_first && interactive)
3101 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3102 "interactive mode.\n");
3104 if (tx_first && lsc_interrupt) {
3105 printf("Warning: lsc_interrupt needs to be off when "
3106 " using tx_first. Disabling.\n");
3110 if (!nb_rxq && !nb_txq)
3111 printf("Warning: Either rx or tx queues should be non-zero\n");
3113 if (nb_rxq > 1 && nb_rxq > nb_txq)
3114 printf("Warning: nb_rxq=%d enables RSS configuration, "
3115 "but nb_txq=%d will prevent to fully test it.\n",
3121 ret = rte_dev_hotplug_handle_enable();
3124 "fail to enable hotplug handling.");
3128 ret = rte_dev_event_monitor_start();
3131 "fail to start device event monitoring.");
3135 ret = rte_dev_event_callback_register(NULL,
3136 eth_dev_event_callback, NULL);
3139 "fail to register device event callback\n");
3144 if (start_port(RTE_PORT_ALL) != 0)
3145 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3147 /* set all ports to promiscuous mode by default */
3148 RTE_ETH_FOREACH_DEV(port_id)
3149 rte_eth_promiscuous_enable(port_id);
3151 /* Init metrics library */
3152 rte_metrics_init(rte_socket_id());
3154 #ifdef RTE_LIBRTE_LATENCY_STATS
3155 if (latencystats_enabled != 0) {
3156 int ret = rte_latencystats_init(1, NULL);
3158 printf("Warning: latencystats init()"
3159 " returned error %d\n", ret);
3160 printf("Latencystats running on lcore %d\n",
3161 latencystats_lcore_id);
3165 /* Setup bitrate stats */
3166 #ifdef RTE_LIBRTE_BITRATE
3167 if (bitrate_enabled != 0) {
3168 bitrate_data = rte_stats_bitrate_create();
3169 if (bitrate_data == NULL)
3170 rte_exit(EXIT_FAILURE,
3171 "Could not allocate bitrate data.\n");
3172 rte_stats_bitrate_reg(bitrate_data);
3176 #ifdef RTE_LIBRTE_CMDLINE
3177 if (strlen(cmdline_filename) != 0)
3178 cmdline_read_from_file(cmdline_filename);
3180 if (interactive == 1) {
3182 printf("Start automatic packet forwarding\n");
3183 start_packet_forwarding(0);
3195 printf("No commandline core given, start packet forwarding\n");
3196 start_packet_forwarding(tx_first);
3197 if (stats_period != 0) {
3198 uint64_t prev_time = 0, cur_time, diff_time = 0;
3199 uint64_t timer_period;
3201 /* Convert to number of cycles */
3202 timer_period = stats_period * rte_get_timer_hz();
3204 while (f_quit == 0) {
3205 cur_time = rte_get_timer_cycles();
3206 diff_time += cur_time - prev_time;
3208 if (diff_time >= timer_period) {
3210 /* Reset the timer */
3213 /* Sleep to avoid unnecessary checks */
3214 prev_time = cur_time;
3219 printf("Press enter to exit\n");
3220 rc = read(0, &c, 1);