1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
230 * Configurable number of RX/TX queues.
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
236 * Configurable number of RX/TX ring descriptors.
237 * Defaults are supplied by drivers via ethdev.
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
244 #define RTE_PMD_PARAM_UNSET -1
246 * Configurable values of RX and TX ring threshold registers.
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of RX free threshold.
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of RX drop enable.
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
268 * Configurable value of TX free threshold.
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
273 * Configurable value of TX RS bit threshold.
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
278 * Configurable value of buffered packets before sending.
280 uint16_t noisy_tx_sw_bufsz;
283 * Configurable value of packet buffer timeout.
285 uint16_t noisy_tx_sw_buf_flush_time;
288 * Configurable value for size of VNF internal memory area
289 * used for simulating noisy neighbour behaviour
291 uint64_t noisy_lkup_mem_sz;
294 * Configurable value of number of random writes done in
295 * VNF simulation memory area.
297 uint64_t noisy_lkup_num_writes;
300 * Configurable value of number of random reads done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_reads;
306 * Configurable value of number of random reads/writes done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads_writes;
312 * Receive Side Scaling (RSS) configuration.
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
317 * Port topology configuration
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
322 * Avoids to flush all the RX streams before starts forwarding.
324 uint8_t no_flush_rx = 0; /* flush by default */
327 * Flow API isolated mode.
329 uint8_t flow_isolate_all;
332 * Avoids to check link status when starting/stopping a port.
334 uint8_t no_link_check = 0; /* check by default */
337 * Enable link status change notification
339 uint8_t lsc_interrupt = 1; /* enabled by default */
342 * Enable device removal notification.
344 uint8_t rmv_interrupt = 1; /* enabled by default */
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
348 /* After attach, port setup is called on event or by iterator */
349 bool setup_on_probe_event = true;
351 /* Pretty printing of ethdev events */
352 static const char * const eth_event_desc[] = {
353 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
354 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
355 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
356 [RTE_ETH_EVENT_INTR_RESET] = "reset",
357 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
358 [RTE_ETH_EVENT_IPSEC] = "IPsec",
359 [RTE_ETH_EVENT_MACSEC] = "MACsec",
360 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
361 [RTE_ETH_EVENT_NEW] = "device probed",
362 [RTE_ETH_EVENT_DESTROY] = "device released",
363 [RTE_ETH_EVENT_MAX] = NULL,
367 * Display or mask ether events
368 * Default to all events except VF_MBOX
370 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
371 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
372 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
373 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
374 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
375 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
376 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
378 * Decide if all memory are locked for performance.
383 * NIC bypass mode configuration options.
386 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
387 /* The NIC bypass watchdog timeout. */
388 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
392 #ifdef RTE_LIBRTE_LATENCY_STATS
395 * Set when latency stats is enabled in the commandline
397 uint8_t latencystats_enabled;
400 * Lcore ID to serive latency statistics.
402 lcoreid_t latencystats_lcore_id = -1;
407 * Ethernet device configuration.
409 struct rte_eth_rxmode rx_mode = {
410 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
413 struct rte_eth_txmode tx_mode = {
414 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
417 struct rte_fdir_conf fdir_conf = {
418 .mode = RTE_FDIR_MODE_NONE,
419 .pballoc = RTE_FDIR_PBALLOC_64K,
420 .status = RTE_FDIR_REPORT_STATUS,
422 .vlan_tci_mask = 0xFFEF,
424 .src_ip = 0xFFFFFFFF,
425 .dst_ip = 0xFFFFFFFF,
428 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
429 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
431 .src_port_mask = 0xFFFF,
432 .dst_port_mask = 0xFFFF,
433 .mac_addr_byte_mask = 0xFF,
434 .tunnel_type_mask = 1,
435 .tunnel_id_mask = 0xFFFFFFFF,
440 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
442 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
443 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
445 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
446 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
448 uint16_t nb_tx_queue_stats_mappings = 0;
449 uint16_t nb_rx_queue_stats_mappings = 0;
452 * Display zero values by default for xstats
454 uint8_t xstats_hide_zero;
456 unsigned int num_sockets = 0;
457 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
459 #ifdef RTE_LIBRTE_BITRATE
460 /* Bitrate statistics */
461 struct rte_stats_bitrates *bitrate_data;
462 lcoreid_t bitrate_lcore_id;
463 uint8_t bitrate_enabled;
466 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
467 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
469 struct vxlan_encap_conf vxlan_encap_conf = {
473 .vni = "\x00\x00\x00",
475 .udp_dst = RTE_BE16(4789),
476 .ipv4_src = IPv4(127, 0, 0, 1),
477 .ipv4_dst = IPv4(255, 255, 255, 255),
478 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
479 "\x00\x00\x00\x00\x00\x00\x00\x01",
480 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
481 "\x00\x00\x00\x00\x00\x00\x11\x11",
485 .eth_src = "\x00\x00\x00\x00\x00\x00",
486 .eth_dst = "\xff\xff\xff\xff\xff\xff",
489 struct nvgre_encap_conf nvgre_encap_conf = {
492 .tni = "\x00\x00\x00",
493 .ipv4_src = IPv4(127, 0, 0, 1),
494 .ipv4_dst = IPv4(255, 255, 255, 255),
495 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
496 "\x00\x00\x00\x00\x00\x00\x00\x01",
497 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
498 "\x00\x00\x00\x00\x00\x00\x11\x11",
500 .eth_src = "\x00\x00\x00\x00\x00\x00",
501 .eth_dst = "\xff\xff\xff\xff\xff\xff",
504 /* Forward function declarations */
505 static void setup_attached_port(portid_t pi);
506 static void map_port_queue_stats_mapping_registers(portid_t pi,
507 struct rte_port *port);
508 static void check_all_ports_link_status(uint32_t port_mask);
509 static int eth_event_callback(portid_t port_id,
510 enum rte_eth_event_type type,
511 void *param, void *ret_param);
512 static void dev_event_callback(const char *device_name,
513 enum rte_dev_event_type type,
517 * Check if all the ports are started.
518 * If yes, return positive value. If not, return zero.
520 static int all_ports_started(void);
522 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
523 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
526 * Helper function to check if socket is already discovered.
527 * If yes, return positive value. If not, return zero.
530 new_socket_id(unsigned int socket_id)
534 for (i = 0; i < num_sockets; i++) {
535 if (socket_ids[i] == socket_id)
542 * Setup default configuration.
545 set_default_fwd_lcores_config(void)
549 unsigned int sock_num;
552 for (i = 0; i < RTE_MAX_LCORE; i++) {
553 if (!rte_lcore_is_enabled(i))
555 sock_num = rte_lcore_to_socket_id(i);
556 if (new_socket_id(sock_num)) {
557 if (num_sockets >= RTE_MAX_NUMA_NODES) {
558 rte_exit(EXIT_FAILURE,
559 "Total sockets greater than %u\n",
562 socket_ids[num_sockets++] = sock_num;
564 if (i == rte_get_master_lcore())
566 fwd_lcores_cpuids[nb_lc++] = i;
568 nb_lcores = (lcoreid_t) nb_lc;
569 nb_cfg_lcores = nb_lcores;
574 set_def_peer_eth_addrs(void)
578 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
579 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
580 peer_eth_addrs[i].addr_bytes[5] = i;
585 set_default_fwd_ports_config(void)
590 RTE_ETH_FOREACH_DEV(pt_id) {
591 fwd_ports_ids[i++] = pt_id;
593 /* Update sockets info according to the attached device */
594 int socket_id = rte_eth_dev_socket_id(pt_id);
595 if (socket_id >= 0 && new_socket_id(socket_id)) {
596 if (num_sockets >= RTE_MAX_NUMA_NODES) {
597 rte_exit(EXIT_FAILURE,
598 "Total sockets greater than %u\n",
601 socket_ids[num_sockets++] = socket_id;
605 nb_cfg_ports = nb_ports;
606 nb_fwd_ports = nb_ports;
610 set_def_fwd_config(void)
612 set_default_fwd_lcores_config();
613 set_def_peer_eth_addrs();
614 set_default_fwd_ports_config();
617 /* extremely pessimistic estimation of memory required to create a mempool */
619 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
621 unsigned int n_pages, mbuf_per_pg, leftover;
622 uint64_t total_mem, mbuf_mem, obj_sz;
624 /* there is no good way to predict how much space the mempool will
625 * occupy because it will allocate chunks on the fly, and some of those
626 * will come from default DPDK memory while some will come from our
627 * external memory, so just assume 128MB will be enough for everyone.
629 uint64_t hdr_mem = 128 << 20;
631 /* account for possible non-contiguousness */
632 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
634 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
638 mbuf_per_pg = pgsz / obj_sz;
639 leftover = (nb_mbufs % mbuf_per_pg) > 0;
640 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
642 mbuf_mem = n_pages * pgsz;
644 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
646 if (total_mem > SIZE_MAX) {
647 TESTPMD_LOG(ERR, "Memory size too big\n");
650 *out = (size_t)total_mem;
656 pagesz_flags(uint64_t page_sz)
658 /* as per mmap() manpage, all page sizes are log2 of page size
659 * shifted by MAP_HUGE_SHIFT
661 int log2 = rte_log2_u64(page_sz);
663 return (log2 << HUGE_SHIFT);
667 alloc_mem(size_t memsz, size_t pgsz, bool huge)
672 /* allocate anonymous hugepages */
673 flags = MAP_ANONYMOUS | MAP_PRIVATE;
675 flags |= HUGE_FLAG | pagesz_flags(pgsz);
677 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
678 if (addr == MAP_FAILED)
684 struct extmem_param {
688 rte_iova_t *iova_table;
689 unsigned int iova_table_len;
693 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
696 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
697 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
698 unsigned int cur_page, n_pages, pgsz_idx;
699 size_t mem_sz, cur_pgsz;
700 rte_iova_t *iovas = NULL;
704 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
705 /* skip anything that is too big */
706 if (pgsizes[pgsz_idx] > SIZE_MAX)
709 cur_pgsz = pgsizes[pgsz_idx];
711 /* if we were told not to allocate hugepages, override */
713 cur_pgsz = sysconf(_SC_PAGESIZE);
715 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
717 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
721 /* allocate our memory */
722 addr = alloc_mem(mem_sz, cur_pgsz, huge);
724 /* if we couldn't allocate memory with a specified page size,
725 * that doesn't mean we can't do it with other page sizes, so
731 /* store IOVA addresses for every page in this memory area */
732 n_pages = mem_sz / cur_pgsz;
734 iovas = malloc(sizeof(*iovas) * n_pages);
737 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
740 /* lock memory if it's not huge pages */
744 /* populate IOVA addresses */
745 for (cur_page = 0; cur_page < n_pages; cur_page++) {
750 offset = cur_pgsz * cur_page;
751 cur = RTE_PTR_ADD(addr, offset);
753 /* touch the page before getting its IOVA */
754 *(volatile char *)cur = 0;
756 iova = rte_mem_virt2iova(cur);
758 iovas[cur_page] = iova;
763 /* if we couldn't allocate anything */
769 param->pgsz = cur_pgsz;
770 param->iova_table = iovas;
771 param->iova_table_len = n_pages;
778 munmap(addr, mem_sz);
784 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
786 struct extmem_param param;
789 memset(¶m, 0, sizeof(param));
791 /* check if our heap exists */
792 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
794 /* create our heap */
795 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
797 TESTPMD_LOG(ERR, "Cannot create heap\n");
802 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
804 TESTPMD_LOG(ERR, "Cannot create memory area\n");
808 /* we now have a valid memory area, so add it to heap */
809 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
810 param.addr, param.len, param.iova_table,
811 param.iova_table_len, param.pgsz);
813 /* when using VFIO, memory is automatically mapped for DMA by EAL */
815 /* not needed any more */
816 free(param.iova_table);
819 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
820 munmap(param.addr, param.len);
826 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
833 * Configuration initialisation done once at init time.
836 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
837 unsigned int socket_id)
839 char pool_name[RTE_MEMPOOL_NAMESIZE];
840 struct rte_mempool *rte_mp = NULL;
843 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
844 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
847 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
848 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
850 switch (mp_alloc_type) {
851 case MP_ALLOC_NATIVE:
853 /* wrapper to rte_mempool_create() */
854 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
855 rte_mbuf_best_mempool_ops());
856 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
857 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
862 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
863 mb_size, (unsigned int) mb_mempool_cache,
864 sizeof(struct rte_pktmbuf_pool_private),
869 if (rte_mempool_populate_anon(rte_mp) == 0) {
870 rte_mempool_free(rte_mp);
874 rte_pktmbuf_pool_init(rte_mp, NULL);
875 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
879 case MP_ALLOC_XMEM_HUGE:
882 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
884 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
885 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
888 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
890 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
892 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
893 rte_mbuf_best_mempool_ops());
894 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
895 mb_mempool_cache, 0, mbuf_seg_size,
901 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
906 if (rte_mp == NULL) {
907 rte_exit(EXIT_FAILURE,
908 "Creation of mbuf pool for socket %u failed: %s\n",
909 socket_id, rte_strerror(rte_errno));
910 } else if (verbose_level > 0) {
911 rte_mempool_dump(stdout, rte_mp);
916 * Check given socket id is valid or not with NUMA mode,
917 * if valid, return 0, else return -1
920 check_socket_id(const unsigned int socket_id)
922 static int warning_once = 0;
924 if (new_socket_id(socket_id)) {
925 if (!warning_once && numa_support)
926 printf("Warning: NUMA should be configured manually by"
927 " using --port-numa-config and"
928 " --ring-numa-config parameters along with"
937 * Get the allowed maximum number of RX queues.
938 * *pid return the port id which has minimal value of
939 * max_rx_queues in all ports.
942 get_allowed_max_nb_rxq(portid_t *pid)
944 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
946 struct rte_eth_dev_info dev_info;
948 RTE_ETH_FOREACH_DEV(pi) {
949 rte_eth_dev_info_get(pi, &dev_info);
950 if (dev_info.max_rx_queues < allowed_max_rxq) {
951 allowed_max_rxq = dev_info.max_rx_queues;
955 return allowed_max_rxq;
959 * Check input rxq is valid or not.
960 * If input rxq is not greater than any of maximum number
961 * of RX queues of all ports, it is valid.
962 * if valid, return 0, else return -1
965 check_nb_rxq(queueid_t rxq)
967 queueid_t allowed_max_rxq;
970 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
971 if (rxq > allowed_max_rxq) {
972 printf("Fail: input rxq (%u) can't be greater "
973 "than max_rx_queues (%u) of port %u\n",
983 * Get the allowed maximum number of TX queues.
984 * *pid return the port id which has minimal value of
985 * max_tx_queues in all ports.
988 get_allowed_max_nb_txq(portid_t *pid)
990 queueid_t allowed_max_txq = MAX_QUEUE_ID;
992 struct rte_eth_dev_info dev_info;
994 RTE_ETH_FOREACH_DEV(pi) {
995 rte_eth_dev_info_get(pi, &dev_info);
996 if (dev_info.max_tx_queues < allowed_max_txq) {
997 allowed_max_txq = dev_info.max_tx_queues;
1001 return allowed_max_txq;
1005 * Check input txq is valid or not.
1006 * If input txq is not greater than any of maximum number
1007 * of TX queues of all ports, it is valid.
1008 * if valid, return 0, else return -1
1011 check_nb_txq(queueid_t txq)
1013 queueid_t allowed_max_txq;
1016 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1017 if (txq > allowed_max_txq) {
1018 printf("Fail: input txq (%u) can't be greater "
1019 "than max_tx_queues (%u) of port %u\n",
1032 struct rte_port *port;
1033 struct rte_mempool *mbp;
1034 unsigned int nb_mbuf_per_pool;
1036 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1037 struct rte_gro_param gro_param;
1041 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1043 /* Configuration of logical cores. */
1044 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1045 sizeof(struct fwd_lcore *) * nb_lcores,
1046 RTE_CACHE_LINE_SIZE);
1047 if (fwd_lcores == NULL) {
1048 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1049 "failed\n", nb_lcores);
1051 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1052 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1053 sizeof(struct fwd_lcore),
1054 RTE_CACHE_LINE_SIZE);
1055 if (fwd_lcores[lc_id] == NULL) {
1056 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1059 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1062 RTE_ETH_FOREACH_DEV(pid) {
1064 /* Apply default TxRx configuration for all ports */
1065 port->dev_conf.txmode = tx_mode;
1066 port->dev_conf.rxmode = rx_mode;
1067 rte_eth_dev_info_get(pid, &port->dev_info);
1069 if (!(port->dev_info.tx_offload_capa &
1070 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1071 port->dev_conf.txmode.offloads &=
1072 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1073 if (!(port->dev_info.tx_offload_capa &
1074 DEV_TX_OFFLOAD_MATCH_METADATA))
1075 port->dev_conf.txmode.offloads &=
1076 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1078 if (port_numa[pid] != NUMA_NO_CONFIG)
1079 port_per_socket[port_numa[pid]]++;
1081 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1084 * if socket_id is invalid,
1085 * set to the first available socket.
1087 if (check_socket_id(socket_id) < 0)
1088 socket_id = socket_ids[0];
1089 port_per_socket[socket_id]++;
1093 /* Apply Rx offloads configuration */
1094 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1095 port->rx_conf[k].offloads =
1096 port->dev_conf.rxmode.offloads;
1097 /* Apply Tx offloads configuration */
1098 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1099 port->tx_conf[k].offloads =
1100 port->dev_conf.txmode.offloads;
1102 /* set flag to initialize port/queue */
1103 port->need_reconfig = 1;
1104 port->need_reconfig_queues = 1;
1105 port->tx_metadata = 0;
1109 * Create pools of mbuf.
1110 * If NUMA support is disabled, create a single pool of mbuf in
1111 * socket 0 memory by default.
1112 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1114 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1115 * nb_txd can be configured at run time.
1117 if (param_total_num_mbufs)
1118 nb_mbuf_per_pool = param_total_num_mbufs;
1120 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1121 (nb_lcores * mb_mempool_cache) +
1122 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1123 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1129 for (i = 0; i < num_sockets; i++)
1130 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1133 if (socket_num == UMA_NO_CONFIG)
1134 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1136 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1142 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1143 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1145 * Records which Mbuf pool to use by each logical core, if needed.
1147 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1148 mbp = mbuf_pool_find(
1149 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1152 mbp = mbuf_pool_find(0);
1153 fwd_lcores[lc_id]->mbp = mbp;
1154 /* initialize GSO context */
1155 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1156 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1157 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1158 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1160 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1163 /* Configuration of packet forwarding streams. */
1164 if (init_fwd_streams() < 0)
1165 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1169 /* create a gro context for each lcore */
1170 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1171 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1172 gro_param.max_item_per_flow = MAX_PKT_BURST;
1173 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1174 gro_param.socket_id = rte_lcore_to_socket_id(
1175 fwd_lcores_cpuids[lc_id]);
1176 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1177 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1178 rte_exit(EXIT_FAILURE,
1179 "rte_gro_ctx_create() failed\n");
1183 #if defined RTE_LIBRTE_PMD_SOFTNIC
1184 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1185 RTE_ETH_FOREACH_DEV(pid) {
1187 const char *driver = port->dev_info.driver_name;
1189 if (strcmp(driver, "net_softnic") == 0)
1190 port->softport.fwd_lcore_arg = fwd_lcores;
1199 reconfig(portid_t new_port_id, unsigned socket_id)
1201 struct rte_port *port;
1203 /* Reconfiguration of Ethernet ports. */
1204 port = &ports[new_port_id];
1205 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1207 /* set flag to initialize port/queue */
1208 port->need_reconfig = 1;
1209 port->need_reconfig_queues = 1;
1210 port->socket_id = socket_id;
1217 init_fwd_streams(void)
1220 struct rte_port *port;
1221 streamid_t sm_id, nb_fwd_streams_new;
1224 /* set socket id according to numa or not */
1225 RTE_ETH_FOREACH_DEV(pid) {
1227 if (nb_rxq > port->dev_info.max_rx_queues) {
1228 printf("Fail: nb_rxq(%d) is greater than "
1229 "max_rx_queues(%d)\n", nb_rxq,
1230 port->dev_info.max_rx_queues);
1233 if (nb_txq > port->dev_info.max_tx_queues) {
1234 printf("Fail: nb_txq(%d) is greater than "
1235 "max_tx_queues(%d)\n", nb_txq,
1236 port->dev_info.max_tx_queues);
1240 if (port_numa[pid] != NUMA_NO_CONFIG)
1241 port->socket_id = port_numa[pid];
1243 port->socket_id = rte_eth_dev_socket_id(pid);
1246 * if socket_id is invalid,
1247 * set to the first available socket.
1249 if (check_socket_id(port->socket_id) < 0)
1250 port->socket_id = socket_ids[0];
1254 if (socket_num == UMA_NO_CONFIG)
1255 port->socket_id = 0;
1257 port->socket_id = socket_num;
1261 q = RTE_MAX(nb_rxq, nb_txq);
1263 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1266 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1267 if (nb_fwd_streams_new == nb_fwd_streams)
1270 if (fwd_streams != NULL) {
1271 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1272 if (fwd_streams[sm_id] == NULL)
1274 rte_free(fwd_streams[sm_id]);
1275 fwd_streams[sm_id] = NULL;
1277 rte_free(fwd_streams);
1282 nb_fwd_streams = nb_fwd_streams_new;
1283 if (nb_fwd_streams) {
1284 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1285 sizeof(struct fwd_stream *) * nb_fwd_streams,
1286 RTE_CACHE_LINE_SIZE);
1287 if (fwd_streams == NULL)
1288 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1289 " (struct fwd_stream *)) failed\n",
1292 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1293 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1294 " struct fwd_stream", sizeof(struct fwd_stream),
1295 RTE_CACHE_LINE_SIZE);
1296 if (fwd_streams[sm_id] == NULL)
1297 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1298 "(struct fwd_stream) failed\n");
1305 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1307 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1309 unsigned int total_burst;
1310 unsigned int nb_burst;
1311 unsigned int burst_stats[3];
1312 uint16_t pktnb_stats[3];
1314 int burst_percent[3];
1317 * First compute the total number of packet bursts and the
1318 * two highest numbers of bursts of the same number of packets.
1321 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1322 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1323 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1324 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1327 total_burst += nb_burst;
1328 if (nb_burst > burst_stats[0]) {
1329 burst_stats[1] = burst_stats[0];
1330 pktnb_stats[1] = pktnb_stats[0];
1331 burst_stats[0] = nb_burst;
1332 pktnb_stats[0] = nb_pkt;
1333 } else if (nb_burst > burst_stats[1]) {
1334 burst_stats[1] = nb_burst;
1335 pktnb_stats[1] = nb_pkt;
1338 if (total_burst == 0)
1340 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1341 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1342 burst_percent[0], (int) pktnb_stats[0]);
1343 if (burst_stats[0] == total_burst) {
1347 if (burst_stats[0] + burst_stats[1] == total_burst) {
1348 printf(" + %d%% of %d pkts]\n",
1349 100 - burst_percent[0], pktnb_stats[1]);
1352 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1353 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1354 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1355 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1358 printf(" + %d%% of %d pkts + %d%% of others]\n",
1359 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1361 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1364 fwd_stream_stats_display(streamid_t stream_id)
1366 struct fwd_stream *fs;
1367 static const char *fwd_top_stats_border = "-------";
1369 fs = fwd_streams[stream_id];
1370 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1371 (fs->fwd_dropped == 0))
1373 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1374 "TX Port=%2d/Queue=%2d %s\n",
1375 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1376 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1377 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1378 " TX-dropped: %-14"PRIu64,
1379 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1381 /* if checksum mode */
1382 if (cur_fwd_eng == &csum_fwd_engine) {
1383 printf(" RX- bad IP checksum: %-14"PRIu64
1384 " Rx- bad L4 checksum: %-14"PRIu64
1385 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1386 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1387 fs->rx_bad_outer_l4_csum);
1392 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1393 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1394 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1399 fwd_stats_display(void)
1401 static const char *fwd_stats_border = "----------------------";
1402 static const char *acc_stats_border = "+++++++++++++++";
1404 struct fwd_stream *rx_stream;
1405 struct fwd_stream *tx_stream;
1406 uint64_t tx_dropped;
1407 uint64_t rx_bad_ip_csum;
1408 uint64_t rx_bad_l4_csum;
1409 uint64_t rx_bad_outer_l4_csum;
1410 } ports_stats[RTE_MAX_ETHPORTS];
1411 uint64_t total_rx_dropped = 0;
1412 uint64_t total_tx_dropped = 0;
1413 uint64_t total_rx_nombuf = 0;
1414 struct rte_eth_stats stats;
1415 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1416 uint64_t fwd_cycles = 0;
1418 uint64_t total_recv = 0;
1419 uint64_t total_xmit = 0;
1420 struct rte_port *port;
1425 memset(ports_stats, 0, sizeof(ports_stats));
1427 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1428 struct fwd_stream *fs = fwd_streams[sm_id];
1430 if (cur_fwd_config.nb_fwd_streams >
1431 cur_fwd_config.nb_fwd_ports) {
1432 fwd_stream_stats_display(sm_id);
1434 ports_stats[fs->tx_port].tx_stream = fs;
1435 ports_stats[fs->rx_port].rx_stream = fs;
1438 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1440 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1441 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1442 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1443 fs->rx_bad_outer_l4_csum;
1445 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1446 fwd_cycles += fs->core_cycles;
1449 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1452 pt_id = fwd_ports_ids[i];
1453 port = &ports[pt_id];
1455 rte_eth_stats_get(pt_id, &stats);
1456 stats.ipackets -= port->stats.ipackets;
1457 stats.opackets -= port->stats.opackets;
1458 stats.ibytes -= port->stats.ibytes;
1459 stats.obytes -= port->stats.obytes;
1460 stats.imissed -= port->stats.imissed;
1461 stats.oerrors -= port->stats.oerrors;
1462 stats.rx_nombuf -= port->stats.rx_nombuf;
1464 total_recv += stats.ipackets;
1465 total_xmit += stats.opackets;
1466 total_rx_dropped += stats.imissed;
1467 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1468 total_tx_dropped += stats.oerrors;
1469 total_rx_nombuf += stats.rx_nombuf;
1471 printf("\n %s Forward statistics for port %-2d %s\n",
1472 fwd_stats_border, pt_id, fwd_stats_border);
1474 if (!port->rx_queue_stats_mapping_enabled &&
1475 !port->tx_queue_stats_mapping_enabled) {
1476 printf(" RX-packets: %-14"PRIu64
1477 " RX-dropped: %-14"PRIu64
1478 "RX-total: %-"PRIu64"\n",
1479 stats.ipackets, stats.imissed,
1480 stats.ipackets + stats.imissed);
1482 if (cur_fwd_eng == &csum_fwd_engine)
1483 printf(" Bad-ipcsum: %-14"PRIu64
1484 " Bad-l4csum: %-14"PRIu64
1485 "Bad-outer-l4csum: %-14"PRIu64"\n",
1486 ports_stats[pt_id].rx_bad_ip_csum,
1487 ports_stats[pt_id].rx_bad_l4_csum,
1488 ports_stats[pt_id].rx_bad_outer_l4_csum);
1489 if (stats.ierrors + stats.rx_nombuf > 0) {
1490 printf(" RX-error: %-"PRIu64"\n",
1492 printf(" RX-nombufs: %-14"PRIu64"\n",
1496 printf(" TX-packets: %-14"PRIu64
1497 " TX-dropped: %-14"PRIu64
1498 "TX-total: %-"PRIu64"\n",
1499 stats.opackets, ports_stats[pt_id].tx_dropped,
1500 stats.opackets + ports_stats[pt_id].tx_dropped);
1502 printf(" RX-packets: %14"PRIu64
1503 " RX-dropped:%14"PRIu64
1504 " RX-total:%14"PRIu64"\n",
1505 stats.ipackets, stats.imissed,
1506 stats.ipackets + stats.imissed);
1508 if (cur_fwd_eng == &csum_fwd_engine)
1509 printf(" Bad-ipcsum:%14"PRIu64
1510 " Bad-l4csum:%14"PRIu64
1511 " Bad-outer-l4csum: %-14"PRIu64"\n",
1512 ports_stats[pt_id].rx_bad_ip_csum,
1513 ports_stats[pt_id].rx_bad_l4_csum,
1514 ports_stats[pt_id].rx_bad_outer_l4_csum);
1515 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1516 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1517 printf(" RX-nombufs: %14"PRIu64"\n",
1521 printf(" TX-packets: %14"PRIu64
1522 " TX-dropped:%14"PRIu64
1523 " TX-total:%14"PRIu64"\n",
1524 stats.opackets, ports_stats[pt_id].tx_dropped,
1525 stats.opackets + ports_stats[pt_id].tx_dropped);
1528 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1529 if (ports_stats[pt_id].rx_stream)
1530 pkt_burst_stats_display("RX",
1531 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1532 if (ports_stats[pt_id].tx_stream)
1533 pkt_burst_stats_display("TX",
1534 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1537 if (port->rx_queue_stats_mapping_enabled) {
1539 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1540 printf(" Stats reg %2d RX-packets:%14"PRIu64
1541 " RX-errors:%14"PRIu64
1542 " RX-bytes:%14"PRIu64"\n",
1543 j, stats.q_ipackets[j],
1544 stats.q_errors[j], stats.q_ibytes[j]);
1548 if (port->tx_queue_stats_mapping_enabled) {
1549 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1550 printf(" Stats reg %2d TX-packets:%14"PRIu64
1553 j, stats.q_opackets[j],
1558 printf(" %s--------------------------------%s\n",
1559 fwd_stats_border, fwd_stats_border);
1562 printf("\n %s Accumulated forward statistics for all ports"
1564 acc_stats_border, acc_stats_border);
1565 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1567 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1569 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1570 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1571 if (total_rx_nombuf > 0)
1572 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1573 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1575 acc_stats_border, acc_stats_border);
1576 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1578 printf("\n CPU cycles/packet=%u (total cycles="
1579 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1580 (unsigned int)(fwd_cycles / total_recv),
1581 fwd_cycles, total_recv);
1586 fwd_stats_reset(void)
1592 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1593 pt_id = fwd_ports_ids[i];
1594 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1596 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1597 struct fwd_stream *fs = fwd_streams[sm_id];
1601 fs->fwd_dropped = 0;
1602 fs->rx_bad_ip_csum = 0;
1603 fs->rx_bad_l4_csum = 0;
1604 fs->rx_bad_outer_l4_csum = 0;
1606 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1607 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1608 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1610 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1611 fs->core_cycles = 0;
1617 flush_fwd_rx_queues(void)
1619 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1626 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1627 uint64_t timer_period;
1629 /* convert to number of cycles */
1630 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1632 for (j = 0; j < 2; j++) {
1633 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1634 for (rxq = 0; rxq < nb_rxq; rxq++) {
1635 port_id = fwd_ports_ids[rxp];
1637 * testpmd can stuck in the below do while loop
1638 * if rte_eth_rx_burst() always returns nonzero
1639 * packets. So timer is added to exit this loop
1640 * after 1sec timer expiry.
1642 prev_tsc = rte_rdtsc();
1644 nb_rx = rte_eth_rx_burst(port_id, rxq,
1645 pkts_burst, MAX_PKT_BURST);
1646 for (i = 0; i < nb_rx; i++)
1647 rte_pktmbuf_free(pkts_burst[i]);
1649 cur_tsc = rte_rdtsc();
1650 diff_tsc = cur_tsc - prev_tsc;
1651 timer_tsc += diff_tsc;
1652 } while ((nb_rx > 0) &&
1653 (timer_tsc < timer_period));
1657 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1662 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1664 struct fwd_stream **fsm;
1667 #ifdef RTE_LIBRTE_BITRATE
1668 uint64_t tics_per_1sec;
1669 uint64_t tics_datum;
1670 uint64_t tics_current;
1671 uint16_t i, cnt_ports;
1673 cnt_ports = nb_ports;
1674 tics_datum = rte_rdtsc();
1675 tics_per_1sec = rte_get_timer_hz();
1677 fsm = &fwd_streams[fc->stream_idx];
1678 nb_fs = fc->stream_nb;
1680 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1681 (*pkt_fwd)(fsm[sm_id]);
1682 #ifdef RTE_LIBRTE_BITRATE
1683 if (bitrate_enabled != 0 &&
1684 bitrate_lcore_id == rte_lcore_id()) {
1685 tics_current = rte_rdtsc();
1686 if (tics_current - tics_datum >= tics_per_1sec) {
1687 /* Periodic bitrate calculation */
1688 for (i = 0; i < cnt_ports; i++)
1689 rte_stats_bitrate_calc(bitrate_data,
1691 tics_datum = tics_current;
1695 #ifdef RTE_LIBRTE_LATENCY_STATS
1696 if (latencystats_enabled != 0 &&
1697 latencystats_lcore_id == rte_lcore_id())
1698 rte_latencystats_update();
1701 } while (! fc->stopped);
1705 start_pkt_forward_on_core(void *fwd_arg)
1707 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1708 cur_fwd_config.fwd_eng->packet_fwd);
1713 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1714 * Used to start communication flows in network loopback test configurations.
1717 run_one_txonly_burst_on_core(void *fwd_arg)
1719 struct fwd_lcore *fwd_lc;
1720 struct fwd_lcore tmp_lcore;
1722 fwd_lc = (struct fwd_lcore *) fwd_arg;
1723 tmp_lcore = *fwd_lc;
1724 tmp_lcore.stopped = 1;
1725 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1730 * Launch packet forwarding:
1731 * - Setup per-port forwarding context.
1732 * - launch logical cores with their forwarding configuration.
1735 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1737 port_fwd_begin_t port_fwd_begin;
1742 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1743 if (port_fwd_begin != NULL) {
1744 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1745 (*port_fwd_begin)(fwd_ports_ids[i]);
1747 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1748 lc_id = fwd_lcores_cpuids[i];
1749 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1750 fwd_lcores[i]->stopped = 0;
1751 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1752 fwd_lcores[i], lc_id);
1754 printf("launch lcore %u failed - diag=%d\n",
1761 * Launch packet forwarding configuration.
1764 start_packet_forwarding(int with_tx_first)
1766 port_fwd_begin_t port_fwd_begin;
1767 port_fwd_end_t port_fwd_end;
1768 struct rte_port *port;
1772 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1773 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1775 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1776 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1778 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1779 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1780 (!nb_rxq || !nb_txq))
1781 rte_exit(EXIT_FAILURE,
1782 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1783 cur_fwd_eng->fwd_mode_name);
1785 if (all_ports_started() == 0) {
1786 printf("Not all ports were started\n");
1789 if (test_done == 0) {
1790 printf("Packet forwarding already started\n");
1796 for (i = 0; i < nb_fwd_ports; i++) {
1797 pt_id = fwd_ports_ids[i];
1798 port = &ports[pt_id];
1799 if (!port->dcb_flag) {
1800 printf("In DCB mode, all forwarding ports must "
1801 "be configured in this mode.\n");
1805 if (nb_fwd_lcores == 1) {
1806 printf("In DCB mode,the nb forwarding cores "
1807 "should be larger than 1.\n");
1816 flush_fwd_rx_queues();
1818 pkt_fwd_config_display(&cur_fwd_config);
1819 rxtx_config_display();
1822 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1823 pt_id = fwd_ports_ids[i];
1824 port = &ports[pt_id];
1825 map_port_queue_stats_mapping_registers(pt_id, port);
1827 if (with_tx_first) {
1828 port_fwd_begin = tx_only_engine.port_fwd_begin;
1829 if (port_fwd_begin != NULL) {
1830 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1831 (*port_fwd_begin)(fwd_ports_ids[i]);
1833 while (with_tx_first--) {
1834 launch_packet_forwarding(
1835 run_one_txonly_burst_on_core);
1836 rte_eal_mp_wait_lcore();
1838 port_fwd_end = tx_only_engine.port_fwd_end;
1839 if (port_fwd_end != NULL) {
1840 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1841 (*port_fwd_end)(fwd_ports_ids[i]);
1844 launch_packet_forwarding(start_pkt_forward_on_core);
1848 stop_packet_forwarding(void)
1850 port_fwd_end_t port_fwd_end;
1856 printf("Packet forwarding not started\n");
1859 printf("Telling cores to stop...");
1860 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1861 fwd_lcores[lc_id]->stopped = 1;
1862 printf("\nWaiting for lcores to finish...\n");
1863 rte_eal_mp_wait_lcore();
1864 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1865 if (port_fwd_end != NULL) {
1866 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1867 pt_id = fwd_ports_ids[i];
1868 (*port_fwd_end)(pt_id);
1872 fwd_stats_display();
1874 printf("\nDone.\n");
1879 dev_set_link_up(portid_t pid)
1881 if (rte_eth_dev_set_link_up(pid) < 0)
1882 printf("\nSet link up fail.\n");
1886 dev_set_link_down(portid_t pid)
1888 if (rte_eth_dev_set_link_down(pid) < 0)
1889 printf("\nSet link down fail.\n");
1893 all_ports_started(void)
1896 struct rte_port *port;
1898 RTE_ETH_FOREACH_DEV(pi) {
1900 /* Check if there is a port which is not started */
1901 if ((port->port_status != RTE_PORT_STARTED) &&
1902 (port->slave_flag == 0))
1906 /* No port is not started */
1911 port_is_stopped(portid_t port_id)
1913 struct rte_port *port = &ports[port_id];
1915 if ((port->port_status != RTE_PORT_STOPPED) &&
1916 (port->slave_flag == 0))
1922 all_ports_stopped(void)
1926 RTE_ETH_FOREACH_DEV(pi) {
1927 if (!port_is_stopped(pi))
1935 port_is_started(portid_t port_id)
1937 if (port_id_is_invalid(port_id, ENABLED_WARN))
1940 if (ports[port_id].port_status != RTE_PORT_STARTED)
1947 start_port(portid_t pid)
1949 int diag, need_check_link_status = -1;
1952 struct rte_port *port;
1953 struct ether_addr mac_addr;
1955 if (port_id_is_invalid(pid, ENABLED_WARN))
1960 RTE_ETH_FOREACH_DEV(pi) {
1961 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1964 need_check_link_status = 0;
1966 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1967 RTE_PORT_HANDLING) == 0) {
1968 printf("Port %d is now not stopped\n", pi);
1972 if (port->need_reconfig > 0) {
1973 port->need_reconfig = 0;
1975 if (flow_isolate_all) {
1976 int ret = port_flow_isolate(pi, 1);
1978 printf("Failed to apply isolated"
1979 " mode on port %d\n", pi);
1983 configure_rxtx_dump_callbacks(0);
1984 printf("Configuring Port %d (socket %u)\n", pi,
1986 /* configure port */
1987 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1990 if (rte_atomic16_cmpset(&(port->port_status),
1991 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1992 printf("Port %d can not be set back "
1993 "to stopped\n", pi);
1994 printf("Fail to configure port %d\n", pi);
1995 /* try to reconfigure port next time */
1996 port->need_reconfig = 1;
2000 if (port->need_reconfig_queues > 0) {
2001 port->need_reconfig_queues = 0;
2002 /* setup tx queues */
2003 for (qi = 0; qi < nb_txq; qi++) {
2004 if ((numa_support) &&
2005 (txring_numa[pi] != NUMA_NO_CONFIG))
2006 diag = rte_eth_tx_queue_setup(pi, qi,
2007 port->nb_tx_desc[qi],
2009 &(port->tx_conf[qi]));
2011 diag = rte_eth_tx_queue_setup(pi, qi,
2012 port->nb_tx_desc[qi],
2014 &(port->tx_conf[qi]));
2019 /* Fail to setup tx queue, return */
2020 if (rte_atomic16_cmpset(&(port->port_status),
2022 RTE_PORT_STOPPED) == 0)
2023 printf("Port %d can not be set back "
2024 "to stopped\n", pi);
2025 printf("Fail to configure port %d tx queues\n",
2027 /* try to reconfigure queues next time */
2028 port->need_reconfig_queues = 1;
2031 for (qi = 0; qi < nb_rxq; qi++) {
2032 /* setup rx queues */
2033 if ((numa_support) &&
2034 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2035 struct rte_mempool * mp =
2036 mbuf_pool_find(rxring_numa[pi]);
2038 printf("Failed to setup RX queue:"
2039 "No mempool allocation"
2040 " on the socket %d\n",
2045 diag = rte_eth_rx_queue_setup(pi, qi,
2046 port->nb_rx_desc[qi],
2048 &(port->rx_conf[qi]),
2051 struct rte_mempool *mp =
2052 mbuf_pool_find(port->socket_id);
2054 printf("Failed to setup RX queue:"
2055 "No mempool allocation"
2056 " on the socket %d\n",
2060 diag = rte_eth_rx_queue_setup(pi, qi,
2061 port->nb_rx_desc[qi],
2063 &(port->rx_conf[qi]),
2069 /* Fail to setup rx queue, return */
2070 if (rte_atomic16_cmpset(&(port->port_status),
2072 RTE_PORT_STOPPED) == 0)
2073 printf("Port %d can not be set back "
2074 "to stopped\n", pi);
2075 printf("Fail to configure port %d rx queues\n",
2077 /* try to reconfigure queues next time */
2078 port->need_reconfig_queues = 1;
2082 configure_rxtx_dump_callbacks(verbose_level);
2084 if (rte_eth_dev_start(pi) < 0) {
2085 printf("Fail to start port %d\n", pi);
2087 /* Fail to setup rx queue, return */
2088 if (rte_atomic16_cmpset(&(port->port_status),
2089 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2090 printf("Port %d can not be set back to "
2095 if (rte_atomic16_cmpset(&(port->port_status),
2096 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2097 printf("Port %d can not be set into started\n", pi);
2099 rte_eth_macaddr_get(pi, &mac_addr);
2100 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2101 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2102 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2103 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2105 /* at least one port started, need checking link status */
2106 need_check_link_status = 1;
2109 if (need_check_link_status == 1 && !no_link_check)
2110 check_all_ports_link_status(RTE_PORT_ALL);
2111 else if (need_check_link_status == 0)
2112 printf("Please stop the ports first\n");
2119 stop_port(portid_t pid)
2122 struct rte_port *port;
2123 int need_check_link_status = 0;
2130 if (port_id_is_invalid(pid, ENABLED_WARN))
2133 printf("Stopping ports...\n");
2135 RTE_ETH_FOREACH_DEV(pi) {
2136 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2139 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2140 printf("Please remove port %d from forwarding configuration.\n", pi);
2144 if (port_is_bonding_slave(pi)) {
2145 printf("Please remove port %d from bonded device.\n", pi);
2150 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2151 RTE_PORT_HANDLING) == 0)
2154 rte_eth_dev_stop(pi);
2156 if (rte_atomic16_cmpset(&(port->port_status),
2157 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2158 printf("Port %d can not be set into stopped\n", pi);
2159 need_check_link_status = 1;
2161 if (need_check_link_status && !no_link_check)
2162 check_all_ports_link_status(RTE_PORT_ALL);
2168 remove_invalid_ports_in(portid_t *array, portid_t *total)
2171 portid_t new_total = 0;
2173 for (i = 0; i < *total; i++)
2174 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2175 array[new_total] = array[i];
2182 remove_invalid_ports(void)
2184 remove_invalid_ports_in(ports_ids, &nb_ports);
2185 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2186 nb_cfg_ports = nb_fwd_ports;
2190 close_port(portid_t pid)
2193 struct rte_port *port;
2195 if (port_id_is_invalid(pid, ENABLED_WARN))
2198 printf("Closing ports...\n");
2200 RTE_ETH_FOREACH_DEV(pi) {
2201 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2204 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2205 printf("Please remove port %d from forwarding configuration.\n", pi);
2209 if (port_is_bonding_slave(pi)) {
2210 printf("Please remove port %d from bonded device.\n", pi);
2215 if (rte_atomic16_cmpset(&(port->port_status),
2216 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2217 printf("Port %d is already closed\n", pi);
2221 if (rte_atomic16_cmpset(&(port->port_status),
2222 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2223 printf("Port %d is now not stopped\n", pi);
2227 if (port->flow_list)
2228 port_flow_flush(pi);
2229 rte_eth_dev_close(pi);
2231 remove_invalid_ports();
2233 if (rte_atomic16_cmpset(&(port->port_status),
2234 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2235 printf("Port %d cannot be set to closed\n", pi);
2242 reset_port(portid_t pid)
2246 struct rte_port *port;
2248 if (port_id_is_invalid(pid, ENABLED_WARN))
2251 printf("Resetting ports...\n");
2253 RTE_ETH_FOREACH_DEV(pi) {
2254 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2257 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2258 printf("Please remove port %d from forwarding "
2259 "configuration.\n", pi);
2263 if (port_is_bonding_slave(pi)) {
2264 printf("Please remove port %d from bonded device.\n",
2269 diag = rte_eth_dev_reset(pi);
2272 port->need_reconfig = 1;
2273 port->need_reconfig_queues = 1;
2275 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2283 attach_port(char *identifier)
2286 struct rte_dev_iterator iterator;
2288 printf("Attaching a new port...\n");
2290 if (identifier == NULL) {
2291 printf("Invalid parameters are specified\n");
2295 if (rte_dev_probe(identifier) != 0) {
2296 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2300 /* first attach mode: event */
2301 if (setup_on_probe_event) {
2302 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2303 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2304 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2305 ports[pi].need_setup != 0)
2306 setup_attached_port(pi);
2310 /* second attach mode: iterator */
2311 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2312 /* setup ports matching the devargs used for probing */
2313 if (port_is_forwarding(pi))
2314 continue; /* port was already attached before */
2315 setup_attached_port(pi);
2320 setup_attached_port(portid_t pi)
2322 unsigned int socket_id;
2324 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2325 /* if socket_id is invalid, set to the first available socket. */
2326 if (check_socket_id(socket_id) < 0)
2327 socket_id = socket_ids[0];
2328 reconfig(pi, socket_id);
2329 rte_eth_promiscuous_enable(pi);
2331 ports_ids[nb_ports++] = pi;
2332 fwd_ports_ids[nb_fwd_ports++] = pi;
2333 nb_cfg_ports = nb_fwd_ports;
2334 ports[pi].need_setup = 0;
2335 ports[pi].port_status = RTE_PORT_STOPPED;
2337 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2342 detach_port_device(portid_t port_id)
2344 struct rte_device *dev;
2347 printf("Removing a device...\n");
2349 dev = rte_eth_devices[port_id].device;
2351 printf("Device already removed\n");
2355 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2356 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2357 printf("Port not stopped\n");
2360 printf("Port was not closed\n");
2361 if (ports[port_id].flow_list)
2362 port_flow_flush(port_id);
2365 if (rte_dev_remove(dev) != 0) {
2366 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2370 for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2371 if (rte_eth_devices[sibling].device != dev)
2373 /* reset mapping between old ports and removed device */
2374 rte_eth_devices[sibling].device = NULL;
2375 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2376 /* sibling ports are forced to be closed */
2377 ports[sibling].port_status = RTE_PORT_CLOSED;
2378 printf("Port %u is closed\n", sibling);
2382 remove_invalid_ports();
2384 printf("Device of port %u is detached\n", port_id);
2385 printf("Now total ports is %d\n", nb_ports);
2393 struct rte_device *device;
2398 stop_packet_forwarding();
2400 if (ports != NULL) {
2402 RTE_ETH_FOREACH_DEV(pt_id) {
2403 printf("\nStopping port %d...\n", pt_id);
2407 RTE_ETH_FOREACH_DEV(pt_id) {
2408 printf("\nShutting down port %d...\n", pt_id);
2413 * This is a workaround to fix a virtio-user issue that
2414 * requires to call clean-up routine to remove existing
2416 * This workaround valid only for testpmd, needs a fix
2417 * valid for all applications.
2418 * TODO: Implement proper resource cleanup
2420 device = rte_eth_devices[pt_id].device;
2421 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2422 detach_port_device(pt_id);
2427 ret = rte_dev_event_monitor_stop();
2430 "fail to stop device event monitor.");
2434 ret = rte_dev_event_callback_unregister(NULL,
2435 dev_event_callback, NULL);
2438 "fail to unregister device event callback.\n");
2442 ret = rte_dev_hotplug_handle_disable();
2445 "fail to disable hotplug handling.\n");
2450 printf("\nBye...\n");
2453 typedef void (*cmd_func_t)(void);
2454 struct pmd_test_command {
2455 const char *cmd_name;
2456 cmd_func_t cmd_func;
2459 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2461 /* Check the link status of all ports in up to 9s, and print them finally */
2463 check_all_ports_link_status(uint32_t port_mask)
2465 #define CHECK_INTERVAL 100 /* 100ms */
2466 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2468 uint8_t count, all_ports_up, print_flag = 0;
2469 struct rte_eth_link link;
2471 printf("Checking link statuses...\n");
2473 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2475 RTE_ETH_FOREACH_DEV(portid) {
2476 if ((port_mask & (1 << portid)) == 0)
2478 memset(&link, 0, sizeof(link));
2479 rte_eth_link_get_nowait(portid, &link);
2480 /* print link status if flag set */
2481 if (print_flag == 1) {
2482 if (link.link_status)
2484 "Port%d Link Up. speed %u Mbps- %s\n",
2485 portid, link.link_speed,
2486 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2487 ("full-duplex") : ("half-duplex\n"));
2489 printf("Port %d Link Down\n", portid);
2492 /* clear all_ports_up flag if any link down */
2493 if (link.link_status == ETH_LINK_DOWN) {
2498 /* after finally printing all link status, get out */
2499 if (print_flag == 1)
2502 if (all_ports_up == 0) {
2504 rte_delay_ms(CHECK_INTERVAL);
2507 /* set the print_flag if all ports up or timeout */
2508 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2518 * This callback is for remove a port for a device. It has limitation because
2519 * it is not for multiple port removal for a device.
2520 * TODO: the device detach invoke will plan to be removed from user side to
2521 * eal. And convert all PMDs to free port resources on ether device closing.
2524 rmv_port_callback(void *arg)
2526 int need_to_start = 0;
2527 int org_no_link_check = no_link_check;
2528 portid_t port_id = (intptr_t)arg;
2530 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2532 if (!test_done && port_is_forwarding(port_id)) {
2534 stop_packet_forwarding();
2538 no_link_check = org_no_link_check;
2539 close_port(port_id);
2540 detach_port_device(port_id);
2542 start_packet_forwarding(0);
2545 /* This function is used by the interrupt thread */
2547 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2550 RTE_SET_USED(param);
2551 RTE_SET_USED(ret_param);
2553 if (type >= RTE_ETH_EVENT_MAX) {
2554 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2555 port_id, __func__, type);
2557 } else if (event_print_mask & (UINT32_C(1) << type)) {
2558 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2559 eth_event_desc[type]);
2564 case RTE_ETH_EVENT_NEW:
2565 ports[port_id].need_setup = 1;
2566 ports[port_id].port_status = RTE_PORT_HANDLING;
2568 case RTE_ETH_EVENT_INTR_RMV:
2569 if (port_id_is_invalid(port_id, DISABLED_WARN))
2571 if (rte_eal_alarm_set(100000,
2572 rmv_port_callback, (void *)(intptr_t)port_id))
2573 fprintf(stderr, "Could not set up deferred device removal\n");
2582 register_eth_event_callback(void)
2585 enum rte_eth_event_type event;
2587 for (event = RTE_ETH_EVENT_UNKNOWN;
2588 event < RTE_ETH_EVENT_MAX; event++) {
2589 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2594 TESTPMD_LOG(ERR, "Failed to register callback for "
2595 "%s event\n", eth_event_desc[event]);
2603 /* This function is used by the interrupt thread */
2605 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2606 __rte_unused void *arg)
2611 if (type >= RTE_DEV_EVENT_MAX) {
2612 fprintf(stderr, "%s called upon invalid event %d\n",
2618 case RTE_DEV_EVENT_REMOVE:
2619 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2621 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2623 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2628 * Because the user's callback is invoked in eal interrupt
2629 * callback, the interrupt callback need to be finished before
2630 * it can be unregistered when detaching device. So finish
2631 * callback soon and use a deferred removal to detach device
2632 * is need. It is a workaround, once the device detaching be
2633 * moved into the eal in the future, the deferred removal could
2636 if (rte_eal_alarm_set(100000,
2637 rmv_port_callback, (void *)(intptr_t)port_id))
2639 "Could not set up deferred device removal\n");
2641 case RTE_DEV_EVENT_ADD:
2642 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2644 /* TODO: After finish kernel driver binding,
2645 * begin to attach port.
2654 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2658 uint8_t mapping_found = 0;
2660 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2661 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2662 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2663 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2664 tx_queue_stats_mappings[i].queue_id,
2665 tx_queue_stats_mappings[i].stats_counter_id);
2672 port->tx_queue_stats_mapping_enabled = 1;
2677 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2681 uint8_t mapping_found = 0;
2683 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2684 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2685 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2686 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2687 rx_queue_stats_mappings[i].queue_id,
2688 rx_queue_stats_mappings[i].stats_counter_id);
2695 port->rx_queue_stats_mapping_enabled = 1;
2700 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2704 diag = set_tx_queue_stats_mapping_registers(pi, port);
2706 if (diag == -ENOTSUP) {
2707 port->tx_queue_stats_mapping_enabled = 0;
2708 printf("TX queue stats mapping not supported port id=%d\n", pi);
2711 rte_exit(EXIT_FAILURE,
2712 "set_tx_queue_stats_mapping_registers "
2713 "failed for port id=%d diag=%d\n",
2717 diag = set_rx_queue_stats_mapping_registers(pi, port);
2719 if (diag == -ENOTSUP) {
2720 port->rx_queue_stats_mapping_enabled = 0;
2721 printf("RX queue stats mapping not supported port id=%d\n", pi);
2724 rte_exit(EXIT_FAILURE,
2725 "set_rx_queue_stats_mapping_registers "
2726 "failed for port id=%d diag=%d\n",
2732 rxtx_port_config(struct rte_port *port)
2736 for (qid = 0; qid < nb_rxq; qid++) {
2737 port->rx_conf[qid] = port->dev_info.default_rxconf;
2739 /* Check if any Rx parameters have been passed */
2740 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2741 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2743 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2744 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2746 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2747 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2749 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2750 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2752 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2753 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2755 port->nb_rx_desc[qid] = nb_rxd;
2758 for (qid = 0; qid < nb_txq; qid++) {
2759 port->tx_conf[qid] = port->dev_info.default_txconf;
2761 /* Check if any Tx parameters have been passed */
2762 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2763 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2765 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2766 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2768 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2769 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2771 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2772 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2774 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2775 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2777 port->nb_tx_desc[qid] = nb_txd;
2782 init_port_config(void)
2785 struct rte_port *port;
2787 RTE_ETH_FOREACH_DEV(pid) {
2789 port->dev_conf.fdir_conf = fdir_conf;
2790 rte_eth_dev_info_get(pid, &port->dev_info);
2792 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2793 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2794 rss_hf & port->dev_info.flow_type_rss_offloads;
2796 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2797 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2800 if (port->dcb_flag == 0) {
2801 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2802 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2804 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2807 rxtx_port_config(port);
2809 rte_eth_macaddr_get(pid, &port->eth_addr);
2811 map_port_queue_stats_mapping_registers(pid, port);
2812 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2813 rte_pmd_ixgbe_bypass_init(pid);
2816 if (lsc_interrupt &&
2817 (rte_eth_devices[pid].data->dev_flags &
2818 RTE_ETH_DEV_INTR_LSC))
2819 port->dev_conf.intr_conf.lsc = 1;
2820 if (rmv_interrupt &&
2821 (rte_eth_devices[pid].data->dev_flags &
2822 RTE_ETH_DEV_INTR_RMV))
2823 port->dev_conf.intr_conf.rmv = 1;
2827 void set_port_slave_flag(portid_t slave_pid)
2829 struct rte_port *port;
2831 port = &ports[slave_pid];
2832 port->slave_flag = 1;
2835 void clear_port_slave_flag(portid_t slave_pid)
2837 struct rte_port *port;
2839 port = &ports[slave_pid];
2840 port->slave_flag = 0;
2843 uint8_t port_is_bonding_slave(portid_t slave_pid)
2845 struct rte_port *port;
2847 port = &ports[slave_pid];
2848 if ((rte_eth_devices[slave_pid].data->dev_flags &
2849 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2854 const uint16_t vlan_tags[] = {
2855 0, 1, 2, 3, 4, 5, 6, 7,
2856 8, 9, 10, 11, 12, 13, 14, 15,
2857 16, 17, 18, 19, 20, 21, 22, 23,
2858 24, 25, 26, 27, 28, 29, 30, 31
2862 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2863 enum dcb_mode_enable dcb_mode,
2864 enum rte_eth_nb_tcs num_tcs,
2869 struct rte_eth_rss_conf rss_conf;
2872 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2873 * given above, and the number of traffic classes available for use.
2875 if (dcb_mode == DCB_VT_ENABLED) {
2876 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2877 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2878 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2879 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2881 /* VMDQ+DCB RX and TX configurations */
2882 vmdq_rx_conf->enable_default_pool = 0;
2883 vmdq_rx_conf->default_pool = 0;
2884 vmdq_rx_conf->nb_queue_pools =
2885 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2886 vmdq_tx_conf->nb_queue_pools =
2887 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2889 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2890 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2891 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2892 vmdq_rx_conf->pool_map[i].pools =
2893 1 << (i % vmdq_rx_conf->nb_queue_pools);
2895 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2896 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2897 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2900 /* set DCB mode of RX and TX of multiple queues */
2901 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2902 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2904 struct rte_eth_dcb_rx_conf *rx_conf =
2905 ð_conf->rx_adv_conf.dcb_rx_conf;
2906 struct rte_eth_dcb_tx_conf *tx_conf =
2907 ð_conf->tx_adv_conf.dcb_tx_conf;
2909 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2913 rx_conf->nb_tcs = num_tcs;
2914 tx_conf->nb_tcs = num_tcs;
2916 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2917 rx_conf->dcb_tc[i] = i % num_tcs;
2918 tx_conf->dcb_tc[i] = i % num_tcs;
2921 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2922 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2923 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2927 eth_conf->dcb_capability_en =
2928 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2930 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2936 init_port_dcb_config(portid_t pid,
2937 enum dcb_mode_enable dcb_mode,
2938 enum rte_eth_nb_tcs num_tcs,
2941 struct rte_eth_conf port_conf;
2942 struct rte_port *rte_port;
2946 rte_port = &ports[pid];
2948 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2949 /* Enter DCB configuration status */
2952 port_conf.rxmode = rte_port->dev_conf.rxmode;
2953 port_conf.txmode = rte_port->dev_conf.txmode;
2955 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2956 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2959 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2961 /* re-configure the device . */
2962 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2964 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2966 /* If dev_info.vmdq_pool_base is greater than 0,
2967 * the queue id of vmdq pools is started after pf queues.
2969 if (dcb_mode == DCB_VT_ENABLED &&
2970 rte_port->dev_info.vmdq_pool_base > 0) {
2971 printf("VMDQ_DCB multi-queue mode is nonsensical"
2972 " for port %d.", pid);
2976 /* Assume the ports in testpmd have the same dcb capability
2977 * and has the same number of rxq and txq in dcb mode
2979 if (dcb_mode == DCB_VT_ENABLED) {
2980 if (rte_port->dev_info.max_vfs > 0) {
2981 nb_rxq = rte_port->dev_info.nb_rx_queues;
2982 nb_txq = rte_port->dev_info.nb_tx_queues;
2984 nb_rxq = rte_port->dev_info.max_rx_queues;
2985 nb_txq = rte_port->dev_info.max_tx_queues;
2988 /*if vt is disabled, use all pf queues */
2989 if (rte_port->dev_info.vmdq_pool_base == 0) {
2990 nb_rxq = rte_port->dev_info.max_rx_queues;
2991 nb_txq = rte_port->dev_info.max_tx_queues;
2993 nb_rxq = (queueid_t)num_tcs;
2994 nb_txq = (queueid_t)num_tcs;
2998 rx_free_thresh = 64;
3000 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3002 rxtx_port_config(rte_port);
3004 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3005 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3006 rx_vft_set(pid, vlan_tags[i], 1);
3008 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3009 map_port_queue_stats_mapping_registers(pid, rte_port);
3011 rte_port->dcb_flag = 1;
3019 /* Configuration of Ethernet ports. */
3020 ports = rte_zmalloc("testpmd: ports",
3021 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3022 RTE_CACHE_LINE_SIZE);
3023 if (ports == NULL) {
3024 rte_exit(EXIT_FAILURE,
3025 "rte_zmalloc(%d struct rte_port) failed\n",
3029 /* Initialize ports NUMA structures */
3030 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3031 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3032 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3046 const char clr[] = { 27, '[', '2', 'J', '\0' };
3047 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3049 /* Clear screen and move to top left */
3050 printf("%s%s", clr, top_left);
3052 printf("\nPort statistics ====================================");
3053 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3054 nic_stats_display(fwd_ports_ids[i]);
3060 signal_handler(int signum)
3062 if (signum == SIGINT || signum == SIGTERM) {
3063 printf("\nSignal %d received, preparing to exit...\n",
3065 #ifdef RTE_LIBRTE_PDUMP
3066 /* uninitialize packet capture framework */
3069 #ifdef RTE_LIBRTE_LATENCY_STATS
3070 rte_latencystats_uninit();
3073 /* Set flag to indicate the force termination. */
3075 /* exit with the expected status */
3076 signal(signum, SIG_DFL);
3077 kill(getpid(), signum);
3082 main(int argc, char** argv)
3089 signal(SIGINT, signal_handler);
3090 signal(SIGTERM, signal_handler);
3092 diag = rte_eal_init(argc, argv);
3094 rte_panic("Cannot init EAL\n");
3096 testpmd_logtype = rte_log_register("testpmd");
3097 if (testpmd_logtype < 0)
3098 rte_panic("Cannot register log type");
3099 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3101 ret = register_eth_event_callback();
3103 rte_panic("Cannot register for ethdev events");
3105 #ifdef RTE_LIBRTE_PDUMP
3106 /* initialize packet capture framework */
3111 RTE_ETH_FOREACH_DEV(port_id) {
3112 ports_ids[count] = port_id;
3115 nb_ports = (portid_t) count;
3117 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3119 /* allocate port structures, and init them */
3122 set_def_fwd_config();
3124 rte_panic("Empty set of forwarding logical cores - check the "
3125 "core mask supplied in the command parameters\n");
3127 /* Bitrate/latency stats disabled by default */
3128 #ifdef RTE_LIBRTE_BITRATE
3129 bitrate_enabled = 0;
3131 #ifdef RTE_LIBRTE_LATENCY_STATS
3132 latencystats_enabled = 0;
3135 /* on FreeBSD, mlockall() is disabled by default */
3136 #ifdef RTE_EXEC_ENV_FREEBSD
3145 launch_args_parse(argc, argv);
3147 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3148 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3152 if (tx_first && interactive)
3153 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3154 "interactive mode.\n");
3156 if (tx_first && lsc_interrupt) {
3157 printf("Warning: lsc_interrupt needs to be off when "
3158 " using tx_first. Disabling.\n");
3162 if (!nb_rxq && !nb_txq)
3163 printf("Warning: Either rx or tx queues should be non-zero\n");
3165 if (nb_rxq > 1 && nb_rxq > nb_txq)
3166 printf("Warning: nb_rxq=%d enables RSS configuration, "
3167 "but nb_txq=%d will prevent to fully test it.\n",
3173 ret = rte_dev_hotplug_handle_enable();
3176 "fail to enable hotplug handling.");
3180 ret = rte_dev_event_monitor_start();
3183 "fail to start device event monitoring.");
3187 ret = rte_dev_event_callback_register(NULL,
3188 dev_event_callback, NULL);
3191 "fail to register device event callback\n");
3196 if (start_port(RTE_PORT_ALL) != 0)
3197 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3199 /* set all ports to promiscuous mode by default */
3200 RTE_ETH_FOREACH_DEV(port_id)
3201 rte_eth_promiscuous_enable(port_id);
3203 /* Init metrics library */
3204 rte_metrics_init(rte_socket_id());
3206 #ifdef RTE_LIBRTE_LATENCY_STATS
3207 if (latencystats_enabled != 0) {
3208 int ret = rte_latencystats_init(1, NULL);
3210 printf("Warning: latencystats init()"
3211 " returned error %d\n", ret);
3212 printf("Latencystats running on lcore %d\n",
3213 latencystats_lcore_id);
3217 /* Setup bitrate stats */
3218 #ifdef RTE_LIBRTE_BITRATE
3219 if (bitrate_enabled != 0) {
3220 bitrate_data = rte_stats_bitrate_create();
3221 if (bitrate_data == NULL)
3222 rte_exit(EXIT_FAILURE,
3223 "Could not allocate bitrate data.\n");
3224 rte_stats_bitrate_reg(bitrate_data);
3228 #ifdef RTE_LIBRTE_CMDLINE
3229 if (strlen(cmdline_filename) != 0)
3230 cmdline_read_from_file(cmdline_filename);
3232 if (interactive == 1) {
3234 printf("Start automatic packet forwarding\n");
3235 start_packet_forwarding(0);
3247 printf("No commandline core given, start packet forwarding\n");
3248 start_packet_forwarding(tx_first);
3249 if (stats_period != 0) {
3250 uint64_t prev_time = 0, cur_time, diff_time = 0;
3251 uint64_t timer_period;
3253 /* Convert to number of cycles */
3254 timer_period = stats_period * rte_get_timer_hz();
3256 while (f_quit == 0) {
3257 cur_time = rte_get_timer_cycles();
3258 diff_time += cur_time - prev_time;
3260 if (diff_time >= timer_period) {
3262 /* Reset the timer */
3265 /* Sleep to avoid unnecessary checks */
3266 prev_time = cur_time;
3271 printf("Press enter to exit\n");
3272 rc = read(0, &c, 1);