1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint8_t txonly_multi_flow;
221 /**< Whether multiple flows are generated in TXONLY mode. */
223 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
224 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
226 /* current configuration is in DCB or not,0 means it is not in DCB mode */
227 uint8_t dcb_config = 0;
229 /* Whether the dcb is in testing status */
230 uint8_t dcb_test = 0;
233 * Configurable number of RX/TX queues.
235 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
236 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
239 * Configurable number of RX/TX ring descriptors.
240 * Defaults are supplied by drivers via ethdev.
242 #define RTE_TEST_RX_DESC_DEFAULT 0
243 #define RTE_TEST_TX_DESC_DEFAULT 0
244 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
245 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
247 #define RTE_PMD_PARAM_UNSET -1
249 * Configurable values of RX and TX ring threshold registers.
252 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
253 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
254 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
256 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
257 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
258 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
261 * Configurable value of RX free threshold.
263 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
266 * Configurable value of RX drop enable.
268 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
271 * Configurable value of TX free threshold.
273 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
276 * Configurable value of TX RS bit threshold.
278 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
281 * Configurable value of buffered packets before sending.
283 uint16_t noisy_tx_sw_bufsz;
286 * Configurable value of packet buffer timeout.
288 uint16_t noisy_tx_sw_buf_flush_time;
291 * Configurable value for size of VNF internal memory area
292 * used for simulating noisy neighbour behaviour
294 uint64_t noisy_lkup_mem_sz;
297 * Configurable value of number of random writes done in
298 * VNF simulation memory area.
300 uint64_t noisy_lkup_num_writes;
303 * Configurable value of number of random reads done in
304 * VNF simulation memory area.
306 uint64_t noisy_lkup_num_reads;
309 * Configurable value of number of random reads/writes done in
310 * VNF simulation memory area.
312 uint64_t noisy_lkup_num_reads_writes;
315 * Receive Side Scaling (RSS) configuration.
317 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
320 * Port topology configuration
322 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
325 * Avoids to flush all the RX streams before starts forwarding.
327 uint8_t no_flush_rx = 0; /* flush by default */
330 * Flow API isolated mode.
332 uint8_t flow_isolate_all;
335 * Avoids to check link status when starting/stopping a port.
337 uint8_t no_link_check = 0; /* check by default */
340 * Enable link status change notification
342 uint8_t lsc_interrupt = 1; /* enabled by default */
345 * Enable device removal notification.
347 uint8_t rmv_interrupt = 1; /* enabled by default */
349 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
351 /* After attach, port setup is called on event or by iterator */
352 bool setup_on_probe_event = true;
354 /* Pretty printing of ethdev events */
355 static const char * const eth_event_desc[] = {
356 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
357 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
358 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
359 [RTE_ETH_EVENT_INTR_RESET] = "reset",
360 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
361 [RTE_ETH_EVENT_IPSEC] = "IPsec",
362 [RTE_ETH_EVENT_MACSEC] = "MACsec",
363 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
364 [RTE_ETH_EVENT_NEW] = "device probed",
365 [RTE_ETH_EVENT_DESTROY] = "device released",
366 [RTE_ETH_EVENT_MAX] = NULL,
370 * Display or mask ether events
371 * Default to all events except VF_MBOX
373 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
374 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
375 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
376 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
377 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
378 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
379 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
381 * Decide if all memory are locked for performance.
386 * NIC bypass mode configuration options.
389 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
390 /* The NIC bypass watchdog timeout. */
391 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
395 #ifdef RTE_LIBRTE_LATENCY_STATS
398 * Set when latency stats is enabled in the commandline
400 uint8_t latencystats_enabled;
403 * Lcore ID to serive latency statistics.
405 lcoreid_t latencystats_lcore_id = -1;
410 * Ethernet device configuration.
412 struct rte_eth_rxmode rx_mode = {
413 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
416 struct rte_eth_txmode tx_mode = {
417 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
420 struct rte_fdir_conf fdir_conf = {
421 .mode = RTE_FDIR_MODE_NONE,
422 .pballoc = RTE_FDIR_PBALLOC_64K,
423 .status = RTE_FDIR_REPORT_STATUS,
425 .vlan_tci_mask = 0xFFEF,
427 .src_ip = 0xFFFFFFFF,
428 .dst_ip = 0xFFFFFFFF,
431 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
432 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
434 .src_port_mask = 0xFFFF,
435 .dst_port_mask = 0xFFFF,
436 .mac_addr_byte_mask = 0xFF,
437 .tunnel_type_mask = 1,
438 .tunnel_id_mask = 0xFFFFFFFF,
443 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
445 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
446 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
448 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
449 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
451 uint16_t nb_tx_queue_stats_mappings = 0;
452 uint16_t nb_rx_queue_stats_mappings = 0;
455 * Display zero values by default for xstats
457 uint8_t xstats_hide_zero;
459 unsigned int num_sockets = 0;
460 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
462 #ifdef RTE_LIBRTE_BITRATE
463 /* Bitrate statistics */
464 struct rte_stats_bitrates *bitrate_data;
465 lcoreid_t bitrate_lcore_id;
466 uint8_t bitrate_enabled;
469 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
470 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
472 struct vxlan_encap_conf vxlan_encap_conf = {
476 .vni = "\x00\x00\x00",
478 .udp_dst = RTE_BE16(4789),
479 .ipv4_src = IPv4(127, 0, 0, 1),
480 .ipv4_dst = IPv4(255, 255, 255, 255),
481 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
482 "\x00\x00\x00\x00\x00\x00\x00\x01",
483 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
484 "\x00\x00\x00\x00\x00\x00\x11\x11",
488 .eth_src = "\x00\x00\x00\x00\x00\x00",
489 .eth_dst = "\xff\xff\xff\xff\xff\xff",
492 struct nvgre_encap_conf nvgre_encap_conf = {
495 .tni = "\x00\x00\x00",
496 .ipv4_src = IPv4(127, 0, 0, 1),
497 .ipv4_dst = IPv4(255, 255, 255, 255),
498 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
499 "\x00\x00\x00\x00\x00\x00\x00\x01",
500 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
501 "\x00\x00\x00\x00\x00\x00\x11\x11",
503 .eth_src = "\x00\x00\x00\x00\x00\x00",
504 .eth_dst = "\xff\xff\xff\xff\xff\xff",
507 /* Forward function declarations */
508 static void setup_attached_port(portid_t pi);
509 static void map_port_queue_stats_mapping_registers(portid_t pi,
510 struct rte_port *port);
511 static void check_all_ports_link_status(uint32_t port_mask);
512 static int eth_event_callback(portid_t port_id,
513 enum rte_eth_event_type type,
514 void *param, void *ret_param);
515 static void dev_event_callback(const char *device_name,
516 enum rte_dev_event_type type,
520 * Check if all the ports are started.
521 * If yes, return positive value. If not, return zero.
523 static int all_ports_started(void);
525 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
526 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
529 * Helper function to check if socket is already discovered.
530 * If yes, return positive value. If not, return zero.
533 new_socket_id(unsigned int socket_id)
537 for (i = 0; i < num_sockets; i++) {
538 if (socket_ids[i] == socket_id)
545 * Setup default configuration.
548 set_default_fwd_lcores_config(void)
552 unsigned int sock_num;
555 for (i = 0; i < RTE_MAX_LCORE; i++) {
556 if (!rte_lcore_is_enabled(i))
558 sock_num = rte_lcore_to_socket_id(i);
559 if (new_socket_id(sock_num)) {
560 if (num_sockets >= RTE_MAX_NUMA_NODES) {
561 rte_exit(EXIT_FAILURE,
562 "Total sockets greater than %u\n",
565 socket_ids[num_sockets++] = sock_num;
567 if (i == rte_get_master_lcore())
569 fwd_lcores_cpuids[nb_lc++] = i;
571 nb_lcores = (lcoreid_t) nb_lc;
572 nb_cfg_lcores = nb_lcores;
577 set_def_peer_eth_addrs(void)
581 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
582 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
583 peer_eth_addrs[i].addr_bytes[5] = i;
588 set_default_fwd_ports_config(void)
593 RTE_ETH_FOREACH_DEV(pt_id) {
594 fwd_ports_ids[i++] = pt_id;
596 /* Update sockets info according to the attached device */
597 int socket_id = rte_eth_dev_socket_id(pt_id);
598 if (socket_id >= 0 && new_socket_id(socket_id)) {
599 if (num_sockets >= RTE_MAX_NUMA_NODES) {
600 rte_exit(EXIT_FAILURE,
601 "Total sockets greater than %u\n",
604 socket_ids[num_sockets++] = socket_id;
608 nb_cfg_ports = nb_ports;
609 nb_fwd_ports = nb_ports;
613 set_def_fwd_config(void)
615 set_default_fwd_lcores_config();
616 set_def_peer_eth_addrs();
617 set_default_fwd_ports_config();
620 /* extremely pessimistic estimation of memory required to create a mempool */
622 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
624 unsigned int n_pages, mbuf_per_pg, leftover;
625 uint64_t total_mem, mbuf_mem, obj_sz;
627 /* there is no good way to predict how much space the mempool will
628 * occupy because it will allocate chunks on the fly, and some of those
629 * will come from default DPDK memory while some will come from our
630 * external memory, so just assume 128MB will be enough for everyone.
632 uint64_t hdr_mem = 128 << 20;
634 /* account for possible non-contiguousness */
635 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
637 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
641 mbuf_per_pg = pgsz / obj_sz;
642 leftover = (nb_mbufs % mbuf_per_pg) > 0;
643 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
645 mbuf_mem = n_pages * pgsz;
647 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
649 if (total_mem > SIZE_MAX) {
650 TESTPMD_LOG(ERR, "Memory size too big\n");
653 *out = (size_t)total_mem;
659 pagesz_flags(uint64_t page_sz)
661 /* as per mmap() manpage, all page sizes are log2 of page size
662 * shifted by MAP_HUGE_SHIFT
664 int log2 = rte_log2_u64(page_sz);
666 return (log2 << HUGE_SHIFT);
670 alloc_mem(size_t memsz, size_t pgsz, bool huge)
675 /* allocate anonymous hugepages */
676 flags = MAP_ANONYMOUS | MAP_PRIVATE;
678 flags |= HUGE_FLAG | pagesz_flags(pgsz);
680 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
681 if (addr == MAP_FAILED)
687 struct extmem_param {
691 rte_iova_t *iova_table;
692 unsigned int iova_table_len;
696 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
699 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
700 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
701 unsigned int cur_page, n_pages, pgsz_idx;
702 size_t mem_sz, cur_pgsz;
703 rte_iova_t *iovas = NULL;
707 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
708 /* skip anything that is too big */
709 if (pgsizes[pgsz_idx] > SIZE_MAX)
712 cur_pgsz = pgsizes[pgsz_idx];
714 /* if we were told not to allocate hugepages, override */
716 cur_pgsz = sysconf(_SC_PAGESIZE);
718 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
720 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
724 /* allocate our memory */
725 addr = alloc_mem(mem_sz, cur_pgsz, huge);
727 /* if we couldn't allocate memory with a specified page size,
728 * that doesn't mean we can't do it with other page sizes, so
734 /* store IOVA addresses for every page in this memory area */
735 n_pages = mem_sz / cur_pgsz;
737 iovas = malloc(sizeof(*iovas) * n_pages);
740 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
743 /* lock memory if it's not huge pages */
747 /* populate IOVA addresses */
748 for (cur_page = 0; cur_page < n_pages; cur_page++) {
753 offset = cur_pgsz * cur_page;
754 cur = RTE_PTR_ADD(addr, offset);
756 /* touch the page before getting its IOVA */
757 *(volatile char *)cur = 0;
759 iova = rte_mem_virt2iova(cur);
761 iovas[cur_page] = iova;
766 /* if we couldn't allocate anything */
772 param->pgsz = cur_pgsz;
773 param->iova_table = iovas;
774 param->iova_table_len = n_pages;
781 munmap(addr, mem_sz);
787 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
789 struct extmem_param param;
792 memset(¶m, 0, sizeof(param));
794 /* check if our heap exists */
795 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
797 /* create our heap */
798 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
800 TESTPMD_LOG(ERR, "Cannot create heap\n");
805 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
807 TESTPMD_LOG(ERR, "Cannot create memory area\n");
811 /* we now have a valid memory area, so add it to heap */
812 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
813 param.addr, param.len, param.iova_table,
814 param.iova_table_len, param.pgsz);
816 /* when using VFIO, memory is automatically mapped for DMA by EAL */
818 /* not needed any more */
819 free(param.iova_table);
822 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
823 munmap(param.addr, param.len);
829 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
836 * Configuration initialisation done once at init time.
839 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
840 unsigned int socket_id)
842 char pool_name[RTE_MEMPOOL_NAMESIZE];
843 struct rte_mempool *rte_mp = NULL;
846 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
847 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
850 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
851 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
853 switch (mp_alloc_type) {
854 case MP_ALLOC_NATIVE:
856 /* wrapper to rte_mempool_create() */
857 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
858 rte_mbuf_best_mempool_ops());
859 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
860 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
865 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
866 mb_size, (unsigned int) mb_mempool_cache,
867 sizeof(struct rte_pktmbuf_pool_private),
872 if (rte_mempool_populate_anon(rte_mp) == 0) {
873 rte_mempool_free(rte_mp);
877 rte_pktmbuf_pool_init(rte_mp, NULL);
878 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
882 case MP_ALLOC_XMEM_HUGE:
885 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
887 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
888 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
891 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
893 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
895 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
896 rte_mbuf_best_mempool_ops());
897 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
898 mb_mempool_cache, 0, mbuf_seg_size,
904 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
909 if (rte_mp == NULL) {
910 rte_exit(EXIT_FAILURE,
911 "Creation of mbuf pool for socket %u failed: %s\n",
912 socket_id, rte_strerror(rte_errno));
913 } else if (verbose_level > 0) {
914 rte_mempool_dump(stdout, rte_mp);
919 * Check given socket id is valid or not with NUMA mode,
920 * if valid, return 0, else return -1
923 check_socket_id(const unsigned int socket_id)
925 static int warning_once = 0;
927 if (new_socket_id(socket_id)) {
928 if (!warning_once && numa_support)
929 printf("Warning: NUMA should be configured manually by"
930 " using --port-numa-config and"
931 " --ring-numa-config parameters along with"
940 * Get the allowed maximum number of RX queues.
941 * *pid return the port id which has minimal value of
942 * max_rx_queues in all ports.
945 get_allowed_max_nb_rxq(portid_t *pid)
947 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
949 struct rte_eth_dev_info dev_info;
951 RTE_ETH_FOREACH_DEV(pi) {
952 rte_eth_dev_info_get(pi, &dev_info);
953 if (dev_info.max_rx_queues < allowed_max_rxq) {
954 allowed_max_rxq = dev_info.max_rx_queues;
958 return allowed_max_rxq;
962 * Check input rxq is valid or not.
963 * If input rxq is not greater than any of maximum number
964 * of RX queues of all ports, it is valid.
965 * if valid, return 0, else return -1
968 check_nb_rxq(queueid_t rxq)
970 queueid_t allowed_max_rxq;
973 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
974 if (rxq > allowed_max_rxq) {
975 printf("Fail: input rxq (%u) can't be greater "
976 "than max_rx_queues (%u) of port %u\n",
986 * Get the allowed maximum number of TX queues.
987 * *pid return the port id which has minimal value of
988 * max_tx_queues in all ports.
991 get_allowed_max_nb_txq(portid_t *pid)
993 queueid_t allowed_max_txq = MAX_QUEUE_ID;
995 struct rte_eth_dev_info dev_info;
997 RTE_ETH_FOREACH_DEV(pi) {
998 rte_eth_dev_info_get(pi, &dev_info);
999 if (dev_info.max_tx_queues < allowed_max_txq) {
1000 allowed_max_txq = dev_info.max_tx_queues;
1004 return allowed_max_txq;
1008 * Check input txq is valid or not.
1009 * If input txq is not greater than any of maximum number
1010 * of TX queues of all ports, it is valid.
1011 * if valid, return 0, else return -1
1014 check_nb_txq(queueid_t txq)
1016 queueid_t allowed_max_txq;
1019 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1020 if (txq > allowed_max_txq) {
1021 printf("Fail: input txq (%u) can't be greater "
1022 "than max_tx_queues (%u) of port %u\n",
1035 struct rte_port *port;
1036 struct rte_mempool *mbp;
1037 unsigned int nb_mbuf_per_pool;
1039 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1040 struct rte_gro_param gro_param;
1044 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1046 /* Configuration of logical cores. */
1047 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1048 sizeof(struct fwd_lcore *) * nb_lcores,
1049 RTE_CACHE_LINE_SIZE);
1050 if (fwd_lcores == NULL) {
1051 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1052 "failed\n", nb_lcores);
1054 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1055 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1056 sizeof(struct fwd_lcore),
1057 RTE_CACHE_LINE_SIZE);
1058 if (fwd_lcores[lc_id] == NULL) {
1059 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1062 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1065 RTE_ETH_FOREACH_DEV(pid) {
1067 /* Apply default TxRx configuration for all ports */
1068 port->dev_conf.txmode = tx_mode;
1069 port->dev_conf.rxmode = rx_mode;
1070 rte_eth_dev_info_get(pid, &port->dev_info);
1072 if (!(port->dev_info.tx_offload_capa &
1073 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1074 port->dev_conf.txmode.offloads &=
1075 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1076 if (!(port->dev_info.tx_offload_capa &
1077 DEV_TX_OFFLOAD_MATCH_METADATA))
1078 port->dev_conf.txmode.offloads &=
1079 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1081 if (port_numa[pid] != NUMA_NO_CONFIG)
1082 port_per_socket[port_numa[pid]]++;
1084 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1087 * if socket_id is invalid,
1088 * set to the first available socket.
1090 if (check_socket_id(socket_id) < 0)
1091 socket_id = socket_ids[0];
1092 port_per_socket[socket_id]++;
1096 /* Apply Rx offloads configuration */
1097 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1098 port->rx_conf[k].offloads =
1099 port->dev_conf.rxmode.offloads;
1100 /* Apply Tx offloads configuration */
1101 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1102 port->tx_conf[k].offloads =
1103 port->dev_conf.txmode.offloads;
1105 /* set flag to initialize port/queue */
1106 port->need_reconfig = 1;
1107 port->need_reconfig_queues = 1;
1108 port->tx_metadata = 0;
1112 * Create pools of mbuf.
1113 * If NUMA support is disabled, create a single pool of mbuf in
1114 * socket 0 memory by default.
1115 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1117 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1118 * nb_txd can be configured at run time.
1120 if (param_total_num_mbufs)
1121 nb_mbuf_per_pool = param_total_num_mbufs;
1123 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1124 (nb_lcores * mb_mempool_cache) +
1125 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1126 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1132 for (i = 0; i < num_sockets; i++)
1133 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1136 if (socket_num == UMA_NO_CONFIG)
1137 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1139 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1145 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1146 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1148 * Records which Mbuf pool to use by each logical core, if needed.
1150 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1151 mbp = mbuf_pool_find(
1152 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1155 mbp = mbuf_pool_find(0);
1156 fwd_lcores[lc_id]->mbp = mbp;
1157 /* initialize GSO context */
1158 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1159 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1160 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1161 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1163 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1166 /* Configuration of packet forwarding streams. */
1167 if (init_fwd_streams() < 0)
1168 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1172 /* create a gro context for each lcore */
1173 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1174 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1175 gro_param.max_item_per_flow = MAX_PKT_BURST;
1176 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1177 gro_param.socket_id = rte_lcore_to_socket_id(
1178 fwd_lcores_cpuids[lc_id]);
1179 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1180 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1181 rte_exit(EXIT_FAILURE,
1182 "rte_gro_ctx_create() failed\n");
1186 #if defined RTE_LIBRTE_PMD_SOFTNIC
1187 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1188 RTE_ETH_FOREACH_DEV(pid) {
1190 const char *driver = port->dev_info.driver_name;
1192 if (strcmp(driver, "net_softnic") == 0)
1193 port->softport.fwd_lcore_arg = fwd_lcores;
1202 reconfig(portid_t new_port_id, unsigned socket_id)
1204 struct rte_port *port;
1206 /* Reconfiguration of Ethernet ports. */
1207 port = &ports[new_port_id];
1208 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1210 /* set flag to initialize port/queue */
1211 port->need_reconfig = 1;
1212 port->need_reconfig_queues = 1;
1213 port->socket_id = socket_id;
1220 init_fwd_streams(void)
1223 struct rte_port *port;
1224 streamid_t sm_id, nb_fwd_streams_new;
1227 /* set socket id according to numa or not */
1228 RTE_ETH_FOREACH_DEV(pid) {
1230 if (nb_rxq > port->dev_info.max_rx_queues) {
1231 printf("Fail: nb_rxq(%d) is greater than "
1232 "max_rx_queues(%d)\n", nb_rxq,
1233 port->dev_info.max_rx_queues);
1236 if (nb_txq > port->dev_info.max_tx_queues) {
1237 printf("Fail: nb_txq(%d) is greater than "
1238 "max_tx_queues(%d)\n", nb_txq,
1239 port->dev_info.max_tx_queues);
1243 if (port_numa[pid] != NUMA_NO_CONFIG)
1244 port->socket_id = port_numa[pid];
1246 port->socket_id = rte_eth_dev_socket_id(pid);
1249 * if socket_id is invalid,
1250 * set to the first available socket.
1252 if (check_socket_id(port->socket_id) < 0)
1253 port->socket_id = socket_ids[0];
1257 if (socket_num == UMA_NO_CONFIG)
1258 port->socket_id = 0;
1260 port->socket_id = socket_num;
1264 q = RTE_MAX(nb_rxq, nb_txq);
1266 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1269 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1270 if (nb_fwd_streams_new == nb_fwd_streams)
1273 if (fwd_streams != NULL) {
1274 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1275 if (fwd_streams[sm_id] == NULL)
1277 rte_free(fwd_streams[sm_id]);
1278 fwd_streams[sm_id] = NULL;
1280 rte_free(fwd_streams);
1285 nb_fwd_streams = nb_fwd_streams_new;
1286 if (nb_fwd_streams) {
1287 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1288 sizeof(struct fwd_stream *) * nb_fwd_streams,
1289 RTE_CACHE_LINE_SIZE);
1290 if (fwd_streams == NULL)
1291 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1292 " (struct fwd_stream *)) failed\n",
1295 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1296 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1297 " struct fwd_stream", sizeof(struct fwd_stream),
1298 RTE_CACHE_LINE_SIZE);
1299 if (fwd_streams[sm_id] == NULL)
1300 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1301 "(struct fwd_stream) failed\n");
1308 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1310 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1312 unsigned int total_burst;
1313 unsigned int nb_burst;
1314 unsigned int burst_stats[3];
1315 uint16_t pktnb_stats[3];
1317 int burst_percent[3];
1320 * First compute the total number of packet bursts and the
1321 * two highest numbers of bursts of the same number of packets.
1324 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1325 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1326 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1327 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1330 total_burst += nb_burst;
1331 if (nb_burst > burst_stats[0]) {
1332 burst_stats[1] = burst_stats[0];
1333 pktnb_stats[1] = pktnb_stats[0];
1334 burst_stats[0] = nb_burst;
1335 pktnb_stats[0] = nb_pkt;
1336 } else if (nb_burst > burst_stats[1]) {
1337 burst_stats[1] = nb_burst;
1338 pktnb_stats[1] = nb_pkt;
1341 if (total_burst == 0)
1343 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1344 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1345 burst_percent[0], (int) pktnb_stats[0]);
1346 if (burst_stats[0] == total_burst) {
1350 if (burst_stats[0] + burst_stats[1] == total_burst) {
1351 printf(" + %d%% of %d pkts]\n",
1352 100 - burst_percent[0], pktnb_stats[1]);
1355 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1356 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1357 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1358 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1361 printf(" + %d%% of %d pkts + %d%% of others]\n",
1362 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1364 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1367 fwd_stream_stats_display(streamid_t stream_id)
1369 struct fwd_stream *fs;
1370 static const char *fwd_top_stats_border = "-------";
1372 fs = fwd_streams[stream_id];
1373 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1374 (fs->fwd_dropped == 0))
1376 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1377 "TX Port=%2d/Queue=%2d %s\n",
1378 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1379 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1380 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1381 " TX-dropped: %-14"PRIu64,
1382 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1384 /* if checksum mode */
1385 if (cur_fwd_eng == &csum_fwd_engine) {
1386 printf(" RX- bad IP checksum: %-14"PRIu64
1387 " Rx- bad L4 checksum: %-14"PRIu64
1388 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1389 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1390 fs->rx_bad_outer_l4_csum);
1395 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1396 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1397 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1402 fwd_stats_display(void)
1404 static const char *fwd_stats_border = "----------------------";
1405 static const char *acc_stats_border = "+++++++++++++++";
1407 struct fwd_stream *rx_stream;
1408 struct fwd_stream *tx_stream;
1409 uint64_t tx_dropped;
1410 uint64_t rx_bad_ip_csum;
1411 uint64_t rx_bad_l4_csum;
1412 uint64_t rx_bad_outer_l4_csum;
1413 } ports_stats[RTE_MAX_ETHPORTS];
1414 uint64_t total_rx_dropped = 0;
1415 uint64_t total_tx_dropped = 0;
1416 uint64_t total_rx_nombuf = 0;
1417 struct rte_eth_stats stats;
1418 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1419 uint64_t fwd_cycles = 0;
1421 uint64_t total_recv = 0;
1422 uint64_t total_xmit = 0;
1423 struct rte_port *port;
1428 memset(ports_stats, 0, sizeof(ports_stats));
1430 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1431 struct fwd_stream *fs = fwd_streams[sm_id];
1433 if (cur_fwd_config.nb_fwd_streams >
1434 cur_fwd_config.nb_fwd_ports) {
1435 fwd_stream_stats_display(sm_id);
1437 ports_stats[fs->tx_port].tx_stream = fs;
1438 ports_stats[fs->rx_port].rx_stream = fs;
1441 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1443 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1444 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1445 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1446 fs->rx_bad_outer_l4_csum;
1448 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1449 fwd_cycles += fs->core_cycles;
1452 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1455 pt_id = fwd_ports_ids[i];
1456 port = &ports[pt_id];
1458 rte_eth_stats_get(pt_id, &stats);
1459 stats.ipackets -= port->stats.ipackets;
1460 stats.opackets -= port->stats.opackets;
1461 stats.ibytes -= port->stats.ibytes;
1462 stats.obytes -= port->stats.obytes;
1463 stats.imissed -= port->stats.imissed;
1464 stats.oerrors -= port->stats.oerrors;
1465 stats.rx_nombuf -= port->stats.rx_nombuf;
1467 total_recv += stats.ipackets;
1468 total_xmit += stats.opackets;
1469 total_rx_dropped += stats.imissed;
1470 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1471 total_tx_dropped += stats.oerrors;
1472 total_rx_nombuf += stats.rx_nombuf;
1474 printf("\n %s Forward statistics for port %-2d %s\n",
1475 fwd_stats_border, pt_id, fwd_stats_border);
1477 if (!port->rx_queue_stats_mapping_enabled &&
1478 !port->tx_queue_stats_mapping_enabled) {
1479 printf(" RX-packets: %-14"PRIu64
1480 " RX-dropped: %-14"PRIu64
1481 "RX-total: %-"PRIu64"\n",
1482 stats.ipackets, stats.imissed,
1483 stats.ipackets + stats.imissed);
1485 if (cur_fwd_eng == &csum_fwd_engine)
1486 printf(" Bad-ipcsum: %-14"PRIu64
1487 " Bad-l4csum: %-14"PRIu64
1488 "Bad-outer-l4csum: %-14"PRIu64"\n",
1489 ports_stats[pt_id].rx_bad_ip_csum,
1490 ports_stats[pt_id].rx_bad_l4_csum,
1491 ports_stats[pt_id].rx_bad_outer_l4_csum);
1492 if (stats.ierrors + stats.rx_nombuf > 0) {
1493 printf(" RX-error: %-"PRIu64"\n",
1495 printf(" RX-nombufs: %-14"PRIu64"\n",
1499 printf(" TX-packets: %-14"PRIu64
1500 " TX-dropped: %-14"PRIu64
1501 "TX-total: %-"PRIu64"\n",
1502 stats.opackets, ports_stats[pt_id].tx_dropped,
1503 stats.opackets + ports_stats[pt_id].tx_dropped);
1505 printf(" RX-packets: %14"PRIu64
1506 " RX-dropped:%14"PRIu64
1507 " RX-total:%14"PRIu64"\n",
1508 stats.ipackets, stats.imissed,
1509 stats.ipackets + stats.imissed);
1511 if (cur_fwd_eng == &csum_fwd_engine)
1512 printf(" Bad-ipcsum:%14"PRIu64
1513 " Bad-l4csum:%14"PRIu64
1514 " Bad-outer-l4csum: %-14"PRIu64"\n",
1515 ports_stats[pt_id].rx_bad_ip_csum,
1516 ports_stats[pt_id].rx_bad_l4_csum,
1517 ports_stats[pt_id].rx_bad_outer_l4_csum);
1518 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1519 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1520 printf(" RX-nombufs: %14"PRIu64"\n",
1524 printf(" TX-packets: %14"PRIu64
1525 " TX-dropped:%14"PRIu64
1526 " TX-total:%14"PRIu64"\n",
1527 stats.opackets, ports_stats[pt_id].tx_dropped,
1528 stats.opackets + ports_stats[pt_id].tx_dropped);
1531 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1532 if (ports_stats[pt_id].rx_stream)
1533 pkt_burst_stats_display("RX",
1534 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1535 if (ports_stats[pt_id].tx_stream)
1536 pkt_burst_stats_display("TX",
1537 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1540 if (port->rx_queue_stats_mapping_enabled) {
1542 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1543 printf(" Stats reg %2d RX-packets:%14"PRIu64
1544 " RX-errors:%14"PRIu64
1545 " RX-bytes:%14"PRIu64"\n",
1546 j, stats.q_ipackets[j],
1547 stats.q_errors[j], stats.q_ibytes[j]);
1551 if (port->tx_queue_stats_mapping_enabled) {
1552 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1553 printf(" Stats reg %2d TX-packets:%14"PRIu64
1556 j, stats.q_opackets[j],
1561 printf(" %s--------------------------------%s\n",
1562 fwd_stats_border, fwd_stats_border);
1565 printf("\n %s Accumulated forward statistics for all ports"
1567 acc_stats_border, acc_stats_border);
1568 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1570 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1572 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1573 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1574 if (total_rx_nombuf > 0)
1575 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1576 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1578 acc_stats_border, acc_stats_border);
1579 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1581 printf("\n CPU cycles/packet=%u (total cycles="
1582 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1583 (unsigned int)(fwd_cycles / total_recv),
1584 fwd_cycles, total_recv);
1589 fwd_stats_reset(void)
1595 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1596 pt_id = fwd_ports_ids[i];
1597 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1599 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1600 struct fwd_stream *fs = fwd_streams[sm_id];
1604 fs->fwd_dropped = 0;
1605 fs->rx_bad_ip_csum = 0;
1606 fs->rx_bad_l4_csum = 0;
1607 fs->rx_bad_outer_l4_csum = 0;
1609 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1610 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1611 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1613 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1614 fs->core_cycles = 0;
1620 flush_fwd_rx_queues(void)
1622 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1629 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1630 uint64_t timer_period;
1632 /* convert to number of cycles */
1633 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1635 for (j = 0; j < 2; j++) {
1636 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1637 for (rxq = 0; rxq < nb_rxq; rxq++) {
1638 port_id = fwd_ports_ids[rxp];
1640 * testpmd can stuck in the below do while loop
1641 * if rte_eth_rx_burst() always returns nonzero
1642 * packets. So timer is added to exit this loop
1643 * after 1sec timer expiry.
1645 prev_tsc = rte_rdtsc();
1647 nb_rx = rte_eth_rx_burst(port_id, rxq,
1648 pkts_burst, MAX_PKT_BURST);
1649 for (i = 0; i < nb_rx; i++)
1650 rte_pktmbuf_free(pkts_burst[i]);
1652 cur_tsc = rte_rdtsc();
1653 diff_tsc = cur_tsc - prev_tsc;
1654 timer_tsc += diff_tsc;
1655 } while ((nb_rx > 0) &&
1656 (timer_tsc < timer_period));
1660 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1665 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1667 struct fwd_stream **fsm;
1670 #ifdef RTE_LIBRTE_BITRATE
1671 uint64_t tics_per_1sec;
1672 uint64_t tics_datum;
1673 uint64_t tics_current;
1674 uint16_t i, cnt_ports;
1676 cnt_ports = nb_ports;
1677 tics_datum = rte_rdtsc();
1678 tics_per_1sec = rte_get_timer_hz();
1680 fsm = &fwd_streams[fc->stream_idx];
1681 nb_fs = fc->stream_nb;
1683 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1684 (*pkt_fwd)(fsm[sm_id]);
1685 #ifdef RTE_LIBRTE_BITRATE
1686 if (bitrate_enabled != 0 &&
1687 bitrate_lcore_id == rte_lcore_id()) {
1688 tics_current = rte_rdtsc();
1689 if (tics_current - tics_datum >= tics_per_1sec) {
1690 /* Periodic bitrate calculation */
1691 for (i = 0; i < cnt_ports; i++)
1692 rte_stats_bitrate_calc(bitrate_data,
1694 tics_datum = tics_current;
1698 #ifdef RTE_LIBRTE_LATENCY_STATS
1699 if (latencystats_enabled != 0 &&
1700 latencystats_lcore_id == rte_lcore_id())
1701 rte_latencystats_update();
1704 } while (! fc->stopped);
1708 start_pkt_forward_on_core(void *fwd_arg)
1710 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1711 cur_fwd_config.fwd_eng->packet_fwd);
1716 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1717 * Used to start communication flows in network loopback test configurations.
1720 run_one_txonly_burst_on_core(void *fwd_arg)
1722 struct fwd_lcore *fwd_lc;
1723 struct fwd_lcore tmp_lcore;
1725 fwd_lc = (struct fwd_lcore *) fwd_arg;
1726 tmp_lcore = *fwd_lc;
1727 tmp_lcore.stopped = 1;
1728 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1733 * Launch packet forwarding:
1734 * - Setup per-port forwarding context.
1735 * - launch logical cores with their forwarding configuration.
1738 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1740 port_fwd_begin_t port_fwd_begin;
1745 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1746 if (port_fwd_begin != NULL) {
1747 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1748 (*port_fwd_begin)(fwd_ports_ids[i]);
1750 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1751 lc_id = fwd_lcores_cpuids[i];
1752 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1753 fwd_lcores[i]->stopped = 0;
1754 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1755 fwd_lcores[i], lc_id);
1757 printf("launch lcore %u failed - diag=%d\n",
1764 * Launch packet forwarding configuration.
1767 start_packet_forwarding(int with_tx_first)
1769 port_fwd_begin_t port_fwd_begin;
1770 port_fwd_end_t port_fwd_end;
1771 struct rte_port *port;
1775 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1776 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1778 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1779 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1781 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1782 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1783 (!nb_rxq || !nb_txq))
1784 rte_exit(EXIT_FAILURE,
1785 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1786 cur_fwd_eng->fwd_mode_name);
1788 if (all_ports_started() == 0) {
1789 printf("Not all ports were started\n");
1792 if (test_done == 0) {
1793 printf("Packet forwarding already started\n");
1799 for (i = 0; i < nb_fwd_ports; i++) {
1800 pt_id = fwd_ports_ids[i];
1801 port = &ports[pt_id];
1802 if (!port->dcb_flag) {
1803 printf("In DCB mode, all forwarding ports must "
1804 "be configured in this mode.\n");
1808 if (nb_fwd_lcores == 1) {
1809 printf("In DCB mode,the nb forwarding cores "
1810 "should be larger than 1.\n");
1819 flush_fwd_rx_queues();
1821 pkt_fwd_config_display(&cur_fwd_config);
1822 rxtx_config_display();
1825 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1826 pt_id = fwd_ports_ids[i];
1827 port = &ports[pt_id];
1828 map_port_queue_stats_mapping_registers(pt_id, port);
1830 if (with_tx_first) {
1831 port_fwd_begin = tx_only_engine.port_fwd_begin;
1832 if (port_fwd_begin != NULL) {
1833 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1834 (*port_fwd_begin)(fwd_ports_ids[i]);
1836 while (with_tx_first--) {
1837 launch_packet_forwarding(
1838 run_one_txonly_burst_on_core);
1839 rte_eal_mp_wait_lcore();
1841 port_fwd_end = tx_only_engine.port_fwd_end;
1842 if (port_fwd_end != NULL) {
1843 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1844 (*port_fwd_end)(fwd_ports_ids[i]);
1847 launch_packet_forwarding(start_pkt_forward_on_core);
1851 stop_packet_forwarding(void)
1853 port_fwd_end_t port_fwd_end;
1859 printf("Packet forwarding not started\n");
1862 printf("Telling cores to stop...");
1863 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1864 fwd_lcores[lc_id]->stopped = 1;
1865 printf("\nWaiting for lcores to finish...\n");
1866 rte_eal_mp_wait_lcore();
1867 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1868 if (port_fwd_end != NULL) {
1869 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1870 pt_id = fwd_ports_ids[i];
1871 (*port_fwd_end)(pt_id);
1875 fwd_stats_display();
1877 printf("\nDone.\n");
1882 dev_set_link_up(portid_t pid)
1884 if (rte_eth_dev_set_link_up(pid) < 0)
1885 printf("\nSet link up fail.\n");
1889 dev_set_link_down(portid_t pid)
1891 if (rte_eth_dev_set_link_down(pid) < 0)
1892 printf("\nSet link down fail.\n");
1896 all_ports_started(void)
1899 struct rte_port *port;
1901 RTE_ETH_FOREACH_DEV(pi) {
1903 /* Check if there is a port which is not started */
1904 if ((port->port_status != RTE_PORT_STARTED) &&
1905 (port->slave_flag == 0))
1909 /* No port is not started */
1914 port_is_stopped(portid_t port_id)
1916 struct rte_port *port = &ports[port_id];
1918 if ((port->port_status != RTE_PORT_STOPPED) &&
1919 (port->slave_flag == 0))
1925 all_ports_stopped(void)
1929 RTE_ETH_FOREACH_DEV(pi) {
1930 if (!port_is_stopped(pi))
1938 port_is_started(portid_t port_id)
1940 if (port_id_is_invalid(port_id, ENABLED_WARN))
1943 if (ports[port_id].port_status != RTE_PORT_STARTED)
1950 start_port(portid_t pid)
1952 int diag, need_check_link_status = -1;
1955 struct rte_port *port;
1956 struct ether_addr mac_addr;
1958 if (port_id_is_invalid(pid, ENABLED_WARN))
1963 RTE_ETH_FOREACH_DEV(pi) {
1964 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1967 need_check_link_status = 0;
1969 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1970 RTE_PORT_HANDLING) == 0) {
1971 printf("Port %d is now not stopped\n", pi);
1975 if (port->need_reconfig > 0) {
1976 port->need_reconfig = 0;
1978 if (flow_isolate_all) {
1979 int ret = port_flow_isolate(pi, 1);
1981 printf("Failed to apply isolated"
1982 " mode on port %d\n", pi);
1986 configure_rxtx_dump_callbacks(0);
1987 printf("Configuring Port %d (socket %u)\n", pi,
1989 /* configure port */
1990 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1993 if (rte_atomic16_cmpset(&(port->port_status),
1994 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1995 printf("Port %d can not be set back "
1996 "to stopped\n", pi);
1997 printf("Fail to configure port %d\n", pi);
1998 /* try to reconfigure port next time */
1999 port->need_reconfig = 1;
2003 if (port->need_reconfig_queues > 0) {
2004 port->need_reconfig_queues = 0;
2005 /* setup tx queues */
2006 for (qi = 0; qi < nb_txq; qi++) {
2007 if ((numa_support) &&
2008 (txring_numa[pi] != NUMA_NO_CONFIG))
2009 diag = rte_eth_tx_queue_setup(pi, qi,
2010 port->nb_tx_desc[qi],
2012 &(port->tx_conf[qi]));
2014 diag = rte_eth_tx_queue_setup(pi, qi,
2015 port->nb_tx_desc[qi],
2017 &(port->tx_conf[qi]));
2022 /* Fail to setup tx queue, return */
2023 if (rte_atomic16_cmpset(&(port->port_status),
2025 RTE_PORT_STOPPED) == 0)
2026 printf("Port %d can not be set back "
2027 "to stopped\n", pi);
2028 printf("Fail to configure port %d tx queues\n",
2030 /* try to reconfigure queues next time */
2031 port->need_reconfig_queues = 1;
2034 for (qi = 0; qi < nb_rxq; qi++) {
2035 /* setup rx queues */
2036 if ((numa_support) &&
2037 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2038 struct rte_mempool * mp =
2039 mbuf_pool_find(rxring_numa[pi]);
2041 printf("Failed to setup RX queue:"
2042 "No mempool allocation"
2043 " on the socket %d\n",
2048 diag = rte_eth_rx_queue_setup(pi, qi,
2049 port->nb_rx_desc[qi],
2051 &(port->rx_conf[qi]),
2054 struct rte_mempool *mp =
2055 mbuf_pool_find(port->socket_id);
2057 printf("Failed to setup RX queue:"
2058 "No mempool allocation"
2059 " on the socket %d\n",
2063 diag = rte_eth_rx_queue_setup(pi, qi,
2064 port->nb_rx_desc[qi],
2066 &(port->rx_conf[qi]),
2072 /* Fail to setup rx queue, return */
2073 if (rte_atomic16_cmpset(&(port->port_status),
2075 RTE_PORT_STOPPED) == 0)
2076 printf("Port %d can not be set back "
2077 "to stopped\n", pi);
2078 printf("Fail to configure port %d rx queues\n",
2080 /* try to reconfigure queues next time */
2081 port->need_reconfig_queues = 1;
2085 configure_rxtx_dump_callbacks(verbose_level);
2087 if (rte_eth_dev_start(pi) < 0) {
2088 printf("Fail to start port %d\n", pi);
2090 /* Fail to setup rx queue, return */
2091 if (rte_atomic16_cmpset(&(port->port_status),
2092 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2093 printf("Port %d can not be set back to "
2098 if (rte_atomic16_cmpset(&(port->port_status),
2099 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2100 printf("Port %d can not be set into started\n", pi);
2102 rte_eth_macaddr_get(pi, &mac_addr);
2103 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2104 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2105 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2106 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2108 /* at least one port started, need checking link status */
2109 need_check_link_status = 1;
2112 if (need_check_link_status == 1 && !no_link_check)
2113 check_all_ports_link_status(RTE_PORT_ALL);
2114 else if (need_check_link_status == 0)
2115 printf("Please stop the ports first\n");
2122 stop_port(portid_t pid)
2125 struct rte_port *port;
2126 int need_check_link_status = 0;
2133 if (port_id_is_invalid(pid, ENABLED_WARN))
2136 printf("Stopping ports...\n");
2138 RTE_ETH_FOREACH_DEV(pi) {
2139 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2142 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143 printf("Please remove port %d from forwarding configuration.\n", pi);
2147 if (port_is_bonding_slave(pi)) {
2148 printf("Please remove port %d from bonded device.\n", pi);
2153 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154 RTE_PORT_HANDLING) == 0)
2157 rte_eth_dev_stop(pi);
2159 if (rte_atomic16_cmpset(&(port->port_status),
2160 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161 printf("Port %d can not be set into stopped\n", pi);
2162 need_check_link_status = 1;
2164 if (need_check_link_status && !no_link_check)
2165 check_all_ports_link_status(RTE_PORT_ALL);
2171 remove_invalid_ports_in(portid_t *array, portid_t *total)
2174 portid_t new_total = 0;
2176 for (i = 0; i < *total; i++)
2177 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2178 array[new_total] = array[i];
2185 remove_invalid_ports(void)
2187 remove_invalid_ports_in(ports_ids, &nb_ports);
2188 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2189 nb_cfg_ports = nb_fwd_ports;
2193 close_port(portid_t pid)
2196 struct rte_port *port;
2198 if (port_id_is_invalid(pid, ENABLED_WARN))
2201 printf("Closing ports...\n");
2203 RTE_ETH_FOREACH_DEV(pi) {
2204 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2207 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208 printf("Please remove port %d from forwarding configuration.\n", pi);
2212 if (port_is_bonding_slave(pi)) {
2213 printf("Please remove port %d from bonded device.\n", pi);
2218 if (rte_atomic16_cmpset(&(port->port_status),
2219 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220 printf("Port %d is already closed\n", pi);
2224 if (rte_atomic16_cmpset(&(port->port_status),
2225 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226 printf("Port %d is now not stopped\n", pi);
2230 if (port->flow_list)
2231 port_flow_flush(pi);
2232 rte_eth_dev_close(pi);
2234 remove_invalid_ports();
2236 if (rte_atomic16_cmpset(&(port->port_status),
2237 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238 printf("Port %d cannot be set to closed\n", pi);
2245 reset_port(portid_t pid)
2249 struct rte_port *port;
2251 if (port_id_is_invalid(pid, ENABLED_WARN))
2254 printf("Resetting ports...\n");
2256 RTE_ETH_FOREACH_DEV(pi) {
2257 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2260 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2261 printf("Please remove port %d from forwarding "
2262 "configuration.\n", pi);
2266 if (port_is_bonding_slave(pi)) {
2267 printf("Please remove port %d from bonded device.\n",
2272 diag = rte_eth_dev_reset(pi);
2275 port->need_reconfig = 1;
2276 port->need_reconfig_queues = 1;
2278 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2286 attach_port(char *identifier)
2289 struct rte_dev_iterator iterator;
2291 printf("Attaching a new port...\n");
2293 if (identifier == NULL) {
2294 printf("Invalid parameters are specified\n");
2298 if (rte_dev_probe(identifier) != 0) {
2299 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2303 /* first attach mode: event */
2304 if (setup_on_probe_event) {
2305 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2306 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2307 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2308 ports[pi].need_setup != 0)
2309 setup_attached_port(pi);
2313 /* second attach mode: iterator */
2314 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2315 /* setup ports matching the devargs used for probing */
2316 if (port_is_forwarding(pi))
2317 continue; /* port was already attached before */
2318 setup_attached_port(pi);
2323 setup_attached_port(portid_t pi)
2325 unsigned int socket_id;
2327 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2328 /* if socket_id is invalid, set to the first available socket. */
2329 if (check_socket_id(socket_id) < 0)
2330 socket_id = socket_ids[0];
2331 reconfig(pi, socket_id);
2332 rte_eth_promiscuous_enable(pi);
2334 ports_ids[nb_ports++] = pi;
2335 fwd_ports_ids[nb_fwd_ports++] = pi;
2336 nb_cfg_ports = nb_fwd_ports;
2337 ports[pi].need_setup = 0;
2338 ports[pi].port_status = RTE_PORT_STOPPED;
2340 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2345 detach_port_device(portid_t port_id)
2347 struct rte_device *dev;
2350 printf("Removing a device...\n");
2352 dev = rte_eth_devices[port_id].device;
2354 printf("Device already removed\n");
2358 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2359 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2360 printf("Port not stopped\n");
2363 printf("Port was not closed\n");
2364 if (ports[port_id].flow_list)
2365 port_flow_flush(port_id);
2368 if (rte_dev_remove(dev) != 0) {
2369 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2373 RTE_ETH_FOREACH_DEV_SIBLING(sibling, port_id) {
2374 /* reset mapping between old ports and removed device */
2375 rte_eth_devices[sibling].device = NULL;
2376 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2377 /* sibling ports are forced to be closed */
2378 ports[sibling].port_status = RTE_PORT_CLOSED;
2379 printf("Port %u is closed\n", sibling);
2383 remove_invalid_ports();
2385 printf("Device of port %u is detached\n", port_id);
2386 printf("Now total ports is %d\n", nb_ports);
2394 struct rte_device *device;
2399 stop_packet_forwarding();
2401 if (ports != NULL) {
2403 RTE_ETH_FOREACH_DEV(pt_id) {
2404 printf("\nStopping port %d...\n", pt_id);
2408 RTE_ETH_FOREACH_DEV(pt_id) {
2409 printf("\nShutting down port %d...\n", pt_id);
2414 * This is a workaround to fix a virtio-user issue that
2415 * requires to call clean-up routine to remove existing
2417 * This workaround valid only for testpmd, needs a fix
2418 * valid for all applications.
2419 * TODO: Implement proper resource cleanup
2421 device = rte_eth_devices[pt_id].device;
2422 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2423 detach_port_device(pt_id);
2428 ret = rte_dev_event_monitor_stop();
2431 "fail to stop device event monitor.");
2435 ret = rte_dev_event_callback_unregister(NULL,
2436 dev_event_callback, NULL);
2439 "fail to unregister device event callback.\n");
2443 ret = rte_dev_hotplug_handle_disable();
2446 "fail to disable hotplug handling.\n");
2451 printf("\nBye...\n");
2454 typedef void (*cmd_func_t)(void);
2455 struct pmd_test_command {
2456 const char *cmd_name;
2457 cmd_func_t cmd_func;
2460 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2462 /* Check the link status of all ports in up to 9s, and print them finally */
2464 check_all_ports_link_status(uint32_t port_mask)
2466 #define CHECK_INTERVAL 100 /* 100ms */
2467 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2469 uint8_t count, all_ports_up, print_flag = 0;
2470 struct rte_eth_link link;
2472 printf("Checking link statuses...\n");
2474 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2476 RTE_ETH_FOREACH_DEV(portid) {
2477 if ((port_mask & (1 << portid)) == 0)
2479 memset(&link, 0, sizeof(link));
2480 rte_eth_link_get_nowait(portid, &link);
2481 /* print link status if flag set */
2482 if (print_flag == 1) {
2483 if (link.link_status)
2485 "Port%d Link Up. speed %u Mbps- %s\n",
2486 portid, link.link_speed,
2487 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2488 ("full-duplex") : ("half-duplex\n"));
2490 printf("Port %d Link Down\n", portid);
2493 /* clear all_ports_up flag if any link down */
2494 if (link.link_status == ETH_LINK_DOWN) {
2499 /* after finally printing all link status, get out */
2500 if (print_flag == 1)
2503 if (all_ports_up == 0) {
2505 rte_delay_ms(CHECK_INTERVAL);
2508 /* set the print_flag if all ports up or timeout */
2509 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2519 * This callback is for remove a port for a device. It has limitation because
2520 * it is not for multiple port removal for a device.
2521 * TODO: the device detach invoke will plan to be removed from user side to
2522 * eal. And convert all PMDs to free port resources on ether device closing.
2525 rmv_port_callback(void *arg)
2527 int need_to_start = 0;
2528 int org_no_link_check = no_link_check;
2529 portid_t port_id = (intptr_t)arg;
2531 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2533 if (!test_done && port_is_forwarding(port_id)) {
2535 stop_packet_forwarding();
2539 no_link_check = org_no_link_check;
2540 close_port(port_id);
2541 detach_port_device(port_id);
2543 start_packet_forwarding(0);
2546 /* This function is used by the interrupt thread */
2548 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2551 RTE_SET_USED(param);
2552 RTE_SET_USED(ret_param);
2554 if (type >= RTE_ETH_EVENT_MAX) {
2555 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2556 port_id, __func__, type);
2558 } else if (event_print_mask & (UINT32_C(1) << type)) {
2559 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2560 eth_event_desc[type]);
2565 case RTE_ETH_EVENT_NEW:
2566 ports[port_id].need_setup = 1;
2567 ports[port_id].port_status = RTE_PORT_HANDLING;
2569 case RTE_ETH_EVENT_INTR_RMV:
2570 if (port_id_is_invalid(port_id, DISABLED_WARN))
2572 if (rte_eal_alarm_set(100000,
2573 rmv_port_callback, (void *)(intptr_t)port_id))
2574 fprintf(stderr, "Could not set up deferred device removal\n");
2583 register_eth_event_callback(void)
2586 enum rte_eth_event_type event;
2588 for (event = RTE_ETH_EVENT_UNKNOWN;
2589 event < RTE_ETH_EVENT_MAX; event++) {
2590 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2595 TESTPMD_LOG(ERR, "Failed to register callback for "
2596 "%s event\n", eth_event_desc[event]);
2604 /* This function is used by the interrupt thread */
2606 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2607 __rte_unused void *arg)
2612 if (type >= RTE_DEV_EVENT_MAX) {
2613 fprintf(stderr, "%s called upon invalid event %d\n",
2619 case RTE_DEV_EVENT_REMOVE:
2620 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2622 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2624 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2629 * Because the user's callback is invoked in eal interrupt
2630 * callback, the interrupt callback need to be finished before
2631 * it can be unregistered when detaching device. So finish
2632 * callback soon and use a deferred removal to detach device
2633 * is need. It is a workaround, once the device detaching be
2634 * moved into the eal in the future, the deferred removal could
2637 if (rte_eal_alarm_set(100000,
2638 rmv_port_callback, (void *)(intptr_t)port_id))
2640 "Could not set up deferred device removal\n");
2642 case RTE_DEV_EVENT_ADD:
2643 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2645 /* TODO: After finish kernel driver binding,
2646 * begin to attach port.
2655 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2659 uint8_t mapping_found = 0;
2661 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2662 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2663 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2664 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2665 tx_queue_stats_mappings[i].queue_id,
2666 tx_queue_stats_mappings[i].stats_counter_id);
2673 port->tx_queue_stats_mapping_enabled = 1;
2678 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2682 uint8_t mapping_found = 0;
2684 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2685 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2686 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2687 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2688 rx_queue_stats_mappings[i].queue_id,
2689 rx_queue_stats_mappings[i].stats_counter_id);
2696 port->rx_queue_stats_mapping_enabled = 1;
2701 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2705 diag = set_tx_queue_stats_mapping_registers(pi, port);
2707 if (diag == -ENOTSUP) {
2708 port->tx_queue_stats_mapping_enabled = 0;
2709 printf("TX queue stats mapping not supported port id=%d\n", pi);
2712 rte_exit(EXIT_FAILURE,
2713 "set_tx_queue_stats_mapping_registers "
2714 "failed for port id=%d diag=%d\n",
2718 diag = set_rx_queue_stats_mapping_registers(pi, port);
2720 if (diag == -ENOTSUP) {
2721 port->rx_queue_stats_mapping_enabled = 0;
2722 printf("RX queue stats mapping not supported port id=%d\n", pi);
2725 rte_exit(EXIT_FAILURE,
2726 "set_rx_queue_stats_mapping_registers "
2727 "failed for port id=%d diag=%d\n",
2733 rxtx_port_config(struct rte_port *port)
2737 for (qid = 0; qid < nb_rxq; qid++) {
2738 port->rx_conf[qid] = port->dev_info.default_rxconf;
2740 /* Check if any Rx parameters have been passed */
2741 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2742 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2744 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2745 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2747 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2748 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2750 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2751 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2753 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2754 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2756 port->nb_rx_desc[qid] = nb_rxd;
2759 for (qid = 0; qid < nb_txq; qid++) {
2760 port->tx_conf[qid] = port->dev_info.default_txconf;
2762 /* Check if any Tx parameters have been passed */
2763 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2764 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2766 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2767 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2769 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2770 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2772 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2773 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2775 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2776 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2778 port->nb_tx_desc[qid] = nb_txd;
2783 init_port_config(void)
2786 struct rte_port *port;
2788 RTE_ETH_FOREACH_DEV(pid) {
2790 port->dev_conf.fdir_conf = fdir_conf;
2791 rte_eth_dev_info_get(pid, &port->dev_info);
2793 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2794 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2795 rss_hf & port->dev_info.flow_type_rss_offloads;
2797 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2798 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2801 if (port->dcb_flag == 0) {
2802 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2803 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2805 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2808 rxtx_port_config(port);
2810 rte_eth_macaddr_get(pid, &port->eth_addr);
2812 map_port_queue_stats_mapping_registers(pid, port);
2813 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2814 rte_pmd_ixgbe_bypass_init(pid);
2817 if (lsc_interrupt &&
2818 (rte_eth_devices[pid].data->dev_flags &
2819 RTE_ETH_DEV_INTR_LSC))
2820 port->dev_conf.intr_conf.lsc = 1;
2821 if (rmv_interrupt &&
2822 (rte_eth_devices[pid].data->dev_flags &
2823 RTE_ETH_DEV_INTR_RMV))
2824 port->dev_conf.intr_conf.rmv = 1;
2828 void set_port_slave_flag(portid_t slave_pid)
2830 struct rte_port *port;
2832 port = &ports[slave_pid];
2833 port->slave_flag = 1;
2836 void clear_port_slave_flag(portid_t slave_pid)
2838 struct rte_port *port;
2840 port = &ports[slave_pid];
2841 port->slave_flag = 0;
2844 uint8_t port_is_bonding_slave(portid_t slave_pid)
2846 struct rte_port *port;
2848 port = &ports[slave_pid];
2849 if ((rte_eth_devices[slave_pid].data->dev_flags &
2850 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2855 const uint16_t vlan_tags[] = {
2856 0, 1, 2, 3, 4, 5, 6, 7,
2857 8, 9, 10, 11, 12, 13, 14, 15,
2858 16, 17, 18, 19, 20, 21, 22, 23,
2859 24, 25, 26, 27, 28, 29, 30, 31
2863 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2864 enum dcb_mode_enable dcb_mode,
2865 enum rte_eth_nb_tcs num_tcs,
2870 struct rte_eth_rss_conf rss_conf;
2873 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2874 * given above, and the number of traffic classes available for use.
2876 if (dcb_mode == DCB_VT_ENABLED) {
2877 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2878 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2879 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2880 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2882 /* VMDQ+DCB RX and TX configurations */
2883 vmdq_rx_conf->enable_default_pool = 0;
2884 vmdq_rx_conf->default_pool = 0;
2885 vmdq_rx_conf->nb_queue_pools =
2886 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2887 vmdq_tx_conf->nb_queue_pools =
2888 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2890 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2891 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2892 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2893 vmdq_rx_conf->pool_map[i].pools =
2894 1 << (i % vmdq_rx_conf->nb_queue_pools);
2896 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2897 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2898 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2901 /* set DCB mode of RX and TX of multiple queues */
2902 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2903 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2905 struct rte_eth_dcb_rx_conf *rx_conf =
2906 ð_conf->rx_adv_conf.dcb_rx_conf;
2907 struct rte_eth_dcb_tx_conf *tx_conf =
2908 ð_conf->tx_adv_conf.dcb_tx_conf;
2910 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2914 rx_conf->nb_tcs = num_tcs;
2915 tx_conf->nb_tcs = num_tcs;
2917 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2918 rx_conf->dcb_tc[i] = i % num_tcs;
2919 tx_conf->dcb_tc[i] = i % num_tcs;
2922 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2923 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2924 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2928 eth_conf->dcb_capability_en =
2929 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2931 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2937 init_port_dcb_config(portid_t pid,
2938 enum dcb_mode_enable dcb_mode,
2939 enum rte_eth_nb_tcs num_tcs,
2942 struct rte_eth_conf port_conf;
2943 struct rte_port *rte_port;
2947 rte_port = &ports[pid];
2949 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2950 /* Enter DCB configuration status */
2953 port_conf.rxmode = rte_port->dev_conf.rxmode;
2954 port_conf.txmode = rte_port->dev_conf.txmode;
2956 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2957 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2960 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2962 /* re-configure the device . */
2963 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2965 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2967 /* If dev_info.vmdq_pool_base is greater than 0,
2968 * the queue id of vmdq pools is started after pf queues.
2970 if (dcb_mode == DCB_VT_ENABLED &&
2971 rte_port->dev_info.vmdq_pool_base > 0) {
2972 printf("VMDQ_DCB multi-queue mode is nonsensical"
2973 " for port %d.", pid);
2977 /* Assume the ports in testpmd have the same dcb capability
2978 * and has the same number of rxq and txq in dcb mode
2980 if (dcb_mode == DCB_VT_ENABLED) {
2981 if (rte_port->dev_info.max_vfs > 0) {
2982 nb_rxq = rte_port->dev_info.nb_rx_queues;
2983 nb_txq = rte_port->dev_info.nb_tx_queues;
2985 nb_rxq = rte_port->dev_info.max_rx_queues;
2986 nb_txq = rte_port->dev_info.max_tx_queues;
2989 /*if vt is disabled, use all pf queues */
2990 if (rte_port->dev_info.vmdq_pool_base == 0) {
2991 nb_rxq = rte_port->dev_info.max_rx_queues;
2992 nb_txq = rte_port->dev_info.max_tx_queues;
2994 nb_rxq = (queueid_t)num_tcs;
2995 nb_txq = (queueid_t)num_tcs;
2999 rx_free_thresh = 64;
3001 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3003 rxtx_port_config(rte_port);
3005 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3006 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3007 rx_vft_set(pid, vlan_tags[i], 1);
3009 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3010 map_port_queue_stats_mapping_registers(pid, rte_port);
3012 rte_port->dcb_flag = 1;
3020 /* Configuration of Ethernet ports. */
3021 ports = rte_zmalloc("testpmd: ports",
3022 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3023 RTE_CACHE_LINE_SIZE);
3024 if (ports == NULL) {
3025 rte_exit(EXIT_FAILURE,
3026 "rte_zmalloc(%d struct rte_port) failed\n",
3030 /* Initialize ports NUMA structures */
3031 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3032 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3033 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3047 const char clr[] = { 27, '[', '2', 'J', '\0' };
3048 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3050 /* Clear screen and move to top left */
3051 printf("%s%s", clr, top_left);
3053 printf("\nPort statistics ====================================");
3054 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3055 nic_stats_display(fwd_ports_ids[i]);
3061 signal_handler(int signum)
3063 if (signum == SIGINT || signum == SIGTERM) {
3064 printf("\nSignal %d received, preparing to exit...\n",
3066 #ifdef RTE_LIBRTE_PDUMP
3067 /* uninitialize packet capture framework */
3070 #ifdef RTE_LIBRTE_LATENCY_STATS
3071 rte_latencystats_uninit();
3074 /* Set flag to indicate the force termination. */
3076 /* exit with the expected status */
3077 signal(signum, SIG_DFL);
3078 kill(getpid(), signum);
3083 main(int argc, char** argv)
3090 signal(SIGINT, signal_handler);
3091 signal(SIGTERM, signal_handler);
3093 diag = rte_eal_init(argc, argv);
3095 rte_panic("Cannot init EAL\n");
3097 testpmd_logtype = rte_log_register("testpmd");
3098 if (testpmd_logtype < 0)
3099 rte_panic("Cannot register log type");
3100 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3102 ret = register_eth_event_callback();
3104 rte_panic("Cannot register for ethdev events");
3106 #ifdef RTE_LIBRTE_PDUMP
3107 /* initialize packet capture framework */
3112 RTE_ETH_FOREACH_DEV(port_id) {
3113 ports_ids[count] = port_id;
3116 nb_ports = (portid_t) count;
3118 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3120 /* allocate port structures, and init them */
3123 set_def_fwd_config();
3125 rte_panic("Empty set of forwarding logical cores - check the "
3126 "core mask supplied in the command parameters\n");
3128 /* Bitrate/latency stats disabled by default */
3129 #ifdef RTE_LIBRTE_BITRATE
3130 bitrate_enabled = 0;
3132 #ifdef RTE_LIBRTE_LATENCY_STATS
3133 latencystats_enabled = 0;
3136 /* on FreeBSD, mlockall() is disabled by default */
3137 #ifdef RTE_EXEC_ENV_FREEBSD
3146 launch_args_parse(argc, argv);
3148 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3149 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3153 if (tx_first && interactive)
3154 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3155 "interactive mode.\n");
3157 if (tx_first && lsc_interrupt) {
3158 printf("Warning: lsc_interrupt needs to be off when "
3159 " using tx_first. Disabling.\n");
3163 if (!nb_rxq && !nb_txq)
3164 printf("Warning: Either rx or tx queues should be non-zero\n");
3166 if (nb_rxq > 1 && nb_rxq > nb_txq)
3167 printf("Warning: nb_rxq=%d enables RSS configuration, "
3168 "but nb_txq=%d will prevent to fully test it.\n",
3174 ret = rte_dev_hotplug_handle_enable();
3177 "fail to enable hotplug handling.");
3181 ret = rte_dev_event_monitor_start();
3184 "fail to start device event monitoring.");
3188 ret = rte_dev_event_callback_register(NULL,
3189 dev_event_callback, NULL);
3192 "fail to register device event callback\n");
3197 if (start_port(RTE_PORT_ALL) != 0)
3198 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3200 /* set all ports to promiscuous mode by default */
3201 RTE_ETH_FOREACH_DEV(port_id)
3202 rte_eth_promiscuous_enable(port_id);
3204 /* Init metrics library */
3205 rte_metrics_init(rte_socket_id());
3207 #ifdef RTE_LIBRTE_LATENCY_STATS
3208 if (latencystats_enabled != 0) {
3209 int ret = rte_latencystats_init(1, NULL);
3211 printf("Warning: latencystats init()"
3212 " returned error %d\n", ret);
3213 printf("Latencystats running on lcore %d\n",
3214 latencystats_lcore_id);
3218 /* Setup bitrate stats */
3219 #ifdef RTE_LIBRTE_BITRATE
3220 if (bitrate_enabled != 0) {
3221 bitrate_data = rte_stats_bitrate_create();
3222 if (bitrate_data == NULL)
3223 rte_exit(EXIT_FAILURE,
3224 "Could not allocate bitrate data.\n");
3225 rte_stats_bitrate_reg(bitrate_data);
3229 #ifdef RTE_LIBRTE_CMDLINE
3230 if (strlen(cmdline_filename) != 0)
3231 cmdline_read_from_file(cmdline_filename);
3233 if (interactive == 1) {
3235 printf("Start automatic packet forwarding\n");
3236 start_packet_forwarding(0);
3248 printf("No commandline core given, start packet forwarding\n");
3249 start_packet_forwarding(tx_first);
3250 if (stats_period != 0) {
3251 uint64_t prev_time = 0, cur_time, diff_time = 0;
3252 uint64_t timer_period;
3254 /* Convert to number of cycles */
3255 timer_period = stats_period * rte_get_timer_hz();
3257 while (f_quit == 0) {
3258 cur_time = rte_get_timer_cycles();
3259 diff_time += cur_time - prev_time;
3261 if (diff_time >= timer_period) {
3263 /* Reset the timer */
3266 /* Sleep to avoid unnecessary checks */
3267 prev_time = cur_time;
3272 printf("Press enter to exit\n");
3273 rc = read(0, &c, 1);