1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATE
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 int testpmd_logtype; /**< Log type for testpmd logs */
85 /* use master core for command line ? */
86 uint8_t interactive = 0;
87 uint8_t auto_start = 0;
89 char cmdline_filename[PATH_MAX] = {0};
92 * NUMA support configuration.
93 * When set, the NUMA support attempts to dispatch the allocation of the
94 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95 * probed ports among the CPU sockets 0 and 1.
96 * Otherwise, all memory is allocated from CPU socket 0.
98 uint8_t numa_support = 1; /**< numa enabled by default */
101 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
104 uint8_t socket_num = UMA_NO_CONFIG;
107 * Select mempool allocation type:
108 * - native: use regular DPDK memory
109 * - anon: use regular DPDK memory to create mempool, but populate using
110 * anonymous memory (may not be IOVA-contiguous)
111 * - xmem: use externally allocated hugepage memory
113 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
116 * Store specified sockets on which memory pool to be used by ports
119 uint8_t port_numa[RTE_MAX_ETHPORTS];
122 * Store specified sockets on which RX ring to be used by ports
125 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which TX ring to be used by ports
131 uint8_t txring_numa[RTE_MAX_ETHPORTS];
134 * Record the Ethernet address of peer target ports to which packets are
136 * Must be instantiated with the ethernet addresses of peer traffic generator
139 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
140 portid_t nb_peer_eth_addrs = 0;
143 * Probed Target Environment.
145 struct rte_port *ports; /**< For all probed ethernet ports. */
146 portid_t nb_ports; /**< Number of probed ethernet ports. */
147 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
148 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
150 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
153 * Test Forwarding Configuration.
154 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
155 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
157 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
158 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
159 portid_t nb_cfg_ports; /**< Number of configured ports. */
160 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
162 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
163 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
165 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
166 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
169 * Forwarding engines.
171 struct fwd_engine * fwd_engines[] = {
181 #if defined RTE_LIBRTE_PMD_SOFTNIC
184 #ifdef RTE_LIBRTE_IEEE1588
185 &ieee1588_fwd_engine,
190 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
191 uint16_t mempool_flags;
193 struct fwd_config cur_fwd_config;
194 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
195 uint32_t retry_enabled;
196 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
197 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
199 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
200 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
201 * specified on command-line. */
202 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
205 * In container, it cannot terminate the process which running with 'stats-period'
206 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
211 * Configuration of packet segments used by the "txonly" processing engine.
213 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
214 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
215 TXONLY_DEF_PACKET_LEN,
217 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
219 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
220 /**< Split policy for packets to TX. */
222 uint8_t txonly_multi_flow;
223 /**< Whether multiple flows are generated in TXONLY mode. */
225 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
226 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
228 /* current configuration is in DCB or not,0 means it is not in DCB mode */
229 uint8_t dcb_config = 0;
231 /* Whether the dcb is in testing status */
232 uint8_t dcb_test = 0;
235 * Configurable number of RX/TX queues.
237 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
238 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
242 * Configurable number of RX/TX ring descriptors.
243 * Defaults are supplied by drivers via ethdev.
245 #define RTE_TEST_RX_DESC_DEFAULT 0
246 #define RTE_TEST_TX_DESC_DEFAULT 0
247 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250 #define RTE_PMD_PARAM_UNSET -1
252 * Configurable values of RX and TX ring threshold registers.
255 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
264 * Configurable value of RX free threshold.
266 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX drop enable.
271 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
274 * Configurable value of TX free threshold.
276 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX RS bit threshold.
281 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of buffered packets before sending.
286 uint16_t noisy_tx_sw_bufsz;
289 * Configurable value of packet buffer timeout.
291 uint16_t noisy_tx_sw_buf_flush_time;
294 * Configurable value for size of VNF internal memory area
295 * used for simulating noisy neighbour behaviour
297 uint64_t noisy_lkup_mem_sz;
300 * Configurable value of number of random writes done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_writes;
306 * Configurable value of number of random reads done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads;
312 * Configurable value of number of random reads/writes done in
313 * VNF simulation memory area.
315 uint64_t noisy_lkup_num_reads_writes;
318 * Receive Side Scaling (RSS) configuration.
320 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
323 * Port topology configuration
325 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
328 * Avoids to flush all the RX streams before starts forwarding.
330 uint8_t no_flush_rx = 0; /* flush by default */
333 * Flow API isolated mode.
335 uint8_t flow_isolate_all;
338 * Avoids to check link status when starting/stopping a port.
340 uint8_t no_link_check = 0; /* check by default */
343 * Don't automatically start all ports in interactive mode.
345 uint8_t no_device_start = 0;
348 * Enable link status change notification
350 uint8_t lsc_interrupt = 1; /* enabled by default */
353 * Enable device removal notification.
355 uint8_t rmv_interrupt = 1; /* enabled by default */
357 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
359 /* After attach, port setup is called on event or by iterator */
360 bool setup_on_probe_event = true;
362 /* Clear ptypes on port initialization. */
363 uint8_t clear_ptypes = true;
365 /* Pretty printing of ethdev events */
366 static const char * const eth_event_desc[] = {
367 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
368 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
369 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
370 [RTE_ETH_EVENT_INTR_RESET] = "reset",
371 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
372 [RTE_ETH_EVENT_IPSEC] = "IPsec",
373 [RTE_ETH_EVENT_MACSEC] = "MACsec",
374 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
375 [RTE_ETH_EVENT_NEW] = "device probed",
376 [RTE_ETH_EVENT_DESTROY] = "device released",
377 [RTE_ETH_EVENT_MAX] = NULL,
381 * Display or mask ether events
382 * Default to all events except VF_MBOX
384 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
385 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
386 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
387 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
388 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
389 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
390 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
392 * Decide if all memory are locked for performance.
397 * NIC bypass mode configuration options.
400 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
401 /* The NIC bypass watchdog timeout. */
402 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
406 #ifdef RTE_LIBRTE_LATENCY_STATS
409 * Set when latency stats is enabled in the commandline
411 uint8_t latencystats_enabled;
414 * Lcore ID to serive latency statistics.
416 lcoreid_t latencystats_lcore_id = -1;
421 * Ethernet device configuration.
423 struct rte_eth_rxmode rx_mode = {
424 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
425 /**< Default maximum frame length. */
428 struct rte_eth_txmode tx_mode = {
429 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
432 struct rte_fdir_conf fdir_conf = {
433 .mode = RTE_FDIR_MODE_NONE,
434 .pballoc = RTE_FDIR_PBALLOC_64K,
435 .status = RTE_FDIR_REPORT_STATUS,
437 .vlan_tci_mask = 0xFFEF,
439 .src_ip = 0xFFFFFFFF,
440 .dst_ip = 0xFFFFFFFF,
443 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
444 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
446 .src_port_mask = 0xFFFF,
447 .dst_port_mask = 0xFFFF,
448 .mac_addr_byte_mask = 0xFF,
449 .tunnel_type_mask = 1,
450 .tunnel_id_mask = 0xFFFFFFFF,
455 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
457 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
458 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
460 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
461 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
463 uint16_t nb_tx_queue_stats_mappings = 0;
464 uint16_t nb_rx_queue_stats_mappings = 0;
467 * Display zero values by default for xstats
469 uint8_t xstats_hide_zero;
471 unsigned int num_sockets = 0;
472 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
474 #ifdef RTE_LIBRTE_BITRATE
475 /* Bitrate statistics */
476 struct rte_stats_bitrates *bitrate_data;
477 lcoreid_t bitrate_lcore_id;
478 uint8_t bitrate_enabled;
481 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
482 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
484 /* Forward function declarations */
485 static void setup_attached_port(portid_t pi);
486 static void map_port_queue_stats_mapping_registers(portid_t pi,
487 struct rte_port *port);
488 static void check_all_ports_link_status(uint32_t port_mask);
489 static int eth_event_callback(portid_t port_id,
490 enum rte_eth_event_type type,
491 void *param, void *ret_param);
492 static void dev_event_callback(const char *device_name,
493 enum rte_dev_event_type type,
497 * Check if all the ports are started.
498 * If yes, return positive value. If not, return zero.
500 static int all_ports_started(void);
502 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
503 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
505 /* Holds the registered mbuf dynamic flags names. */
506 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
509 * Helper function to check if socket is already discovered.
510 * If yes, return positive value. If not, return zero.
513 new_socket_id(unsigned int socket_id)
517 for (i = 0; i < num_sockets; i++) {
518 if (socket_ids[i] == socket_id)
525 * Setup default configuration.
528 set_default_fwd_lcores_config(void)
532 unsigned int sock_num;
535 for (i = 0; i < RTE_MAX_LCORE; i++) {
536 if (!rte_lcore_is_enabled(i))
538 sock_num = rte_lcore_to_socket_id(i);
539 if (new_socket_id(sock_num)) {
540 if (num_sockets >= RTE_MAX_NUMA_NODES) {
541 rte_exit(EXIT_FAILURE,
542 "Total sockets greater than %u\n",
545 socket_ids[num_sockets++] = sock_num;
547 if (i == rte_get_master_lcore())
549 fwd_lcores_cpuids[nb_lc++] = i;
551 nb_lcores = (lcoreid_t) nb_lc;
552 nb_cfg_lcores = nb_lcores;
557 set_def_peer_eth_addrs(void)
561 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
562 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
563 peer_eth_addrs[i].addr_bytes[5] = i;
568 set_default_fwd_ports_config(void)
573 RTE_ETH_FOREACH_DEV(pt_id) {
574 fwd_ports_ids[i++] = pt_id;
576 /* Update sockets info according to the attached device */
577 int socket_id = rte_eth_dev_socket_id(pt_id);
578 if (socket_id >= 0 && new_socket_id(socket_id)) {
579 if (num_sockets >= RTE_MAX_NUMA_NODES) {
580 rte_exit(EXIT_FAILURE,
581 "Total sockets greater than %u\n",
584 socket_ids[num_sockets++] = socket_id;
588 nb_cfg_ports = nb_ports;
589 nb_fwd_ports = nb_ports;
593 set_def_fwd_config(void)
595 set_default_fwd_lcores_config();
596 set_def_peer_eth_addrs();
597 set_default_fwd_ports_config();
600 /* extremely pessimistic estimation of memory required to create a mempool */
602 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
604 unsigned int n_pages, mbuf_per_pg, leftover;
605 uint64_t total_mem, mbuf_mem, obj_sz;
607 /* there is no good way to predict how much space the mempool will
608 * occupy because it will allocate chunks on the fly, and some of those
609 * will come from default DPDK memory while some will come from our
610 * external memory, so just assume 128MB will be enough for everyone.
612 uint64_t hdr_mem = 128 << 20;
614 /* account for possible non-contiguousness */
615 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
617 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
621 mbuf_per_pg = pgsz / obj_sz;
622 leftover = (nb_mbufs % mbuf_per_pg) > 0;
623 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
625 mbuf_mem = n_pages * pgsz;
627 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
629 if (total_mem > SIZE_MAX) {
630 TESTPMD_LOG(ERR, "Memory size too big\n");
633 *out = (size_t)total_mem;
639 pagesz_flags(uint64_t page_sz)
641 /* as per mmap() manpage, all page sizes are log2 of page size
642 * shifted by MAP_HUGE_SHIFT
644 int log2 = rte_log2_u64(page_sz);
646 return (log2 << HUGE_SHIFT);
650 alloc_mem(size_t memsz, size_t pgsz, bool huge)
655 /* allocate anonymous hugepages */
656 flags = MAP_ANONYMOUS | MAP_PRIVATE;
658 flags |= HUGE_FLAG | pagesz_flags(pgsz);
660 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
661 if (addr == MAP_FAILED)
667 struct extmem_param {
671 rte_iova_t *iova_table;
672 unsigned int iova_table_len;
676 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
679 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
680 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
681 unsigned int cur_page, n_pages, pgsz_idx;
682 size_t mem_sz, cur_pgsz;
683 rte_iova_t *iovas = NULL;
687 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
688 /* skip anything that is too big */
689 if (pgsizes[pgsz_idx] > SIZE_MAX)
692 cur_pgsz = pgsizes[pgsz_idx];
694 /* if we were told not to allocate hugepages, override */
696 cur_pgsz = sysconf(_SC_PAGESIZE);
698 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
700 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
704 /* allocate our memory */
705 addr = alloc_mem(mem_sz, cur_pgsz, huge);
707 /* if we couldn't allocate memory with a specified page size,
708 * that doesn't mean we can't do it with other page sizes, so
714 /* store IOVA addresses for every page in this memory area */
715 n_pages = mem_sz / cur_pgsz;
717 iovas = malloc(sizeof(*iovas) * n_pages);
720 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
723 /* lock memory if it's not huge pages */
727 /* populate IOVA addresses */
728 for (cur_page = 0; cur_page < n_pages; cur_page++) {
733 offset = cur_pgsz * cur_page;
734 cur = RTE_PTR_ADD(addr, offset);
736 /* touch the page before getting its IOVA */
737 *(volatile char *)cur = 0;
739 iova = rte_mem_virt2iova(cur);
741 iovas[cur_page] = iova;
746 /* if we couldn't allocate anything */
752 param->pgsz = cur_pgsz;
753 param->iova_table = iovas;
754 param->iova_table_len = n_pages;
761 munmap(addr, mem_sz);
767 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
769 struct extmem_param param;
772 memset(¶m, 0, sizeof(param));
774 /* check if our heap exists */
775 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
777 /* create our heap */
778 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
780 TESTPMD_LOG(ERR, "Cannot create heap\n");
785 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
787 TESTPMD_LOG(ERR, "Cannot create memory area\n");
791 /* we now have a valid memory area, so add it to heap */
792 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
793 param.addr, param.len, param.iova_table,
794 param.iova_table_len, param.pgsz);
796 /* when using VFIO, memory is automatically mapped for DMA by EAL */
798 /* not needed any more */
799 free(param.iova_table);
802 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
803 munmap(param.addr, param.len);
809 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
815 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
816 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
821 RTE_ETH_FOREACH_DEV(pid) {
822 struct rte_eth_dev *dev =
823 &rte_eth_devices[pid];
825 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
829 "unable to DMA unmap addr 0x%p "
831 memhdr->addr, dev->data->name);
834 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
837 "unable to un-register addr 0x%p\n", memhdr->addr);
842 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
843 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
846 size_t page_size = sysconf(_SC_PAGESIZE);
849 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
853 "unable to register addr 0x%p\n", memhdr->addr);
856 RTE_ETH_FOREACH_DEV(pid) {
857 struct rte_eth_dev *dev =
858 &rte_eth_devices[pid];
860 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
864 "unable to DMA map addr 0x%p "
866 memhdr->addr, dev->data->name);
872 * Configuration initialisation done once at init time.
874 static struct rte_mempool *
875 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
876 unsigned int socket_id)
878 char pool_name[RTE_MEMPOOL_NAMESIZE];
879 struct rte_mempool *rte_mp = NULL;
882 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
883 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
886 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
887 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
889 switch (mp_alloc_type) {
890 case MP_ALLOC_NATIVE:
892 /* wrapper to rte_mempool_create() */
893 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
894 rte_mbuf_best_mempool_ops());
895 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
896 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
901 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
902 mb_size, (unsigned int) mb_mempool_cache,
903 sizeof(struct rte_pktmbuf_pool_private),
904 socket_id, mempool_flags);
908 if (rte_mempool_populate_anon(rte_mp) == 0) {
909 rte_mempool_free(rte_mp);
913 rte_pktmbuf_pool_init(rte_mp, NULL);
914 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
915 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
919 case MP_ALLOC_XMEM_HUGE:
922 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
924 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
925 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
928 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
930 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
932 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
933 rte_mbuf_best_mempool_ops());
934 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
935 mb_mempool_cache, 0, mbuf_seg_size,
941 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
946 if (rte_mp == NULL) {
947 rte_exit(EXIT_FAILURE,
948 "Creation of mbuf pool for socket %u failed: %s\n",
949 socket_id, rte_strerror(rte_errno));
950 } else if (verbose_level > 0) {
951 rte_mempool_dump(stdout, rte_mp);
957 * Check given socket id is valid or not with NUMA mode,
958 * if valid, return 0, else return -1
961 check_socket_id(const unsigned int socket_id)
963 static int warning_once = 0;
965 if (new_socket_id(socket_id)) {
966 if (!warning_once && numa_support)
967 printf("Warning: NUMA should be configured manually by"
968 " using --port-numa-config and"
969 " --ring-numa-config parameters along with"
978 * Get the allowed maximum number of RX queues.
979 * *pid return the port id which has minimal value of
980 * max_rx_queues in all ports.
983 get_allowed_max_nb_rxq(portid_t *pid)
985 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
986 bool max_rxq_valid = false;
988 struct rte_eth_dev_info dev_info;
990 RTE_ETH_FOREACH_DEV(pi) {
991 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
994 max_rxq_valid = true;
995 if (dev_info.max_rx_queues < allowed_max_rxq) {
996 allowed_max_rxq = dev_info.max_rx_queues;
1000 return max_rxq_valid ? allowed_max_rxq : 0;
1004 * Check input rxq is valid or not.
1005 * If input rxq is not greater than any of maximum number
1006 * of RX queues of all ports, it is valid.
1007 * if valid, return 0, else return -1
1010 check_nb_rxq(queueid_t rxq)
1012 queueid_t allowed_max_rxq;
1015 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1016 if (rxq > allowed_max_rxq) {
1017 printf("Fail: input rxq (%u) can't be greater "
1018 "than max_rx_queues (%u) of port %u\n",
1028 * Get the allowed maximum number of TX queues.
1029 * *pid return the port id which has minimal value of
1030 * max_tx_queues in all ports.
1033 get_allowed_max_nb_txq(portid_t *pid)
1035 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1036 bool max_txq_valid = false;
1038 struct rte_eth_dev_info dev_info;
1040 RTE_ETH_FOREACH_DEV(pi) {
1041 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1044 max_txq_valid = true;
1045 if (dev_info.max_tx_queues < allowed_max_txq) {
1046 allowed_max_txq = dev_info.max_tx_queues;
1050 return max_txq_valid ? allowed_max_txq : 0;
1054 * Check input txq is valid or not.
1055 * If input txq is not greater than any of maximum number
1056 * of TX queues of all ports, it is valid.
1057 * if valid, return 0, else return -1
1060 check_nb_txq(queueid_t txq)
1062 queueid_t allowed_max_txq;
1065 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1066 if (txq > allowed_max_txq) {
1067 printf("Fail: input txq (%u) can't be greater "
1068 "than max_tx_queues (%u) of port %u\n",
1078 * Get the allowed maximum number of hairpin queues.
1079 * *pid return the port id which has minimal value of
1080 * max_hairpin_queues in all ports.
1083 get_allowed_max_nb_hairpinq(portid_t *pid)
1085 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1087 struct rte_eth_hairpin_cap cap;
1089 RTE_ETH_FOREACH_DEV(pi) {
1090 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1094 if (cap.max_nb_queues < allowed_max_hairpinq) {
1095 allowed_max_hairpinq = cap.max_nb_queues;
1099 return allowed_max_hairpinq;
1103 * Check input hairpin is valid or not.
1104 * If input hairpin is not greater than any of maximum number
1105 * of hairpin queues of all ports, it is valid.
1106 * if valid, return 0, else return -1
1109 check_nb_hairpinq(queueid_t hairpinq)
1111 queueid_t allowed_max_hairpinq;
1114 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1115 if (hairpinq > allowed_max_hairpinq) {
1116 printf("Fail: input hairpin (%u) can't be greater "
1117 "than max_hairpin_queues (%u) of port %u\n",
1118 hairpinq, allowed_max_hairpinq, pid);
1128 struct rte_port *port;
1129 struct rte_mempool *mbp;
1130 unsigned int nb_mbuf_per_pool;
1132 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1133 struct rte_gro_param gro_param;
1140 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1142 /* Configuration of logical cores. */
1143 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1144 sizeof(struct fwd_lcore *) * nb_lcores,
1145 RTE_CACHE_LINE_SIZE);
1146 if (fwd_lcores == NULL) {
1147 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1148 "failed\n", nb_lcores);
1150 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1151 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1152 sizeof(struct fwd_lcore),
1153 RTE_CACHE_LINE_SIZE);
1154 if (fwd_lcores[lc_id] == NULL) {
1155 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1158 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1161 RTE_ETH_FOREACH_DEV(pid) {
1163 /* Apply default TxRx configuration for all ports */
1164 port->dev_conf.txmode = tx_mode;
1165 port->dev_conf.rxmode = rx_mode;
1167 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1169 rte_exit(EXIT_FAILURE,
1170 "rte_eth_dev_info_get() failed\n");
1172 if (!(port->dev_info.tx_offload_capa &
1173 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1174 port->dev_conf.txmode.offloads &=
1175 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1177 if (port_numa[pid] != NUMA_NO_CONFIG)
1178 port_per_socket[port_numa[pid]]++;
1180 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1183 * if socket_id is invalid,
1184 * set to the first available socket.
1186 if (check_socket_id(socket_id) < 0)
1187 socket_id = socket_ids[0];
1188 port_per_socket[socket_id]++;
1192 /* Apply Rx offloads configuration */
1193 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1194 port->rx_conf[k].offloads =
1195 port->dev_conf.rxmode.offloads;
1196 /* Apply Tx offloads configuration */
1197 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1198 port->tx_conf[k].offloads =
1199 port->dev_conf.txmode.offloads;
1201 /* set flag to initialize port/queue */
1202 port->need_reconfig = 1;
1203 port->need_reconfig_queues = 1;
1204 port->tx_metadata = 0;
1206 /* Check for maximum number of segments per MTU. Accordingly
1207 * update the mbuf data size.
1209 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1210 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1211 data_size = rx_mode.max_rx_pkt_len /
1212 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1214 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1216 mbuf_data_size = data_size +
1217 RTE_PKTMBUF_HEADROOM;
1224 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1228 * Create pools of mbuf.
1229 * If NUMA support is disabled, create a single pool of mbuf in
1230 * socket 0 memory by default.
1231 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1233 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1234 * nb_txd can be configured at run time.
1236 if (param_total_num_mbufs)
1237 nb_mbuf_per_pool = param_total_num_mbufs;
1239 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1240 (nb_lcores * mb_mempool_cache) +
1241 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1242 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1248 for (i = 0; i < num_sockets; i++)
1249 mempools[i] = mbuf_pool_create(mbuf_data_size,
1253 if (socket_num == UMA_NO_CONFIG)
1254 mempools[0] = mbuf_pool_create(mbuf_data_size,
1255 nb_mbuf_per_pool, 0);
1257 mempools[socket_num] = mbuf_pool_create
1265 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1266 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1268 * Records which Mbuf pool to use by each logical core, if needed.
1270 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1271 mbp = mbuf_pool_find(
1272 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1275 mbp = mbuf_pool_find(0);
1276 fwd_lcores[lc_id]->mbp = mbp;
1277 /* initialize GSO context */
1278 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1279 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1280 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1281 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1283 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1286 /* Configuration of packet forwarding streams. */
1287 if (init_fwd_streams() < 0)
1288 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1292 /* create a gro context for each lcore */
1293 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1294 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1295 gro_param.max_item_per_flow = MAX_PKT_BURST;
1296 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1297 gro_param.socket_id = rte_lcore_to_socket_id(
1298 fwd_lcores_cpuids[lc_id]);
1299 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1300 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1301 rte_exit(EXIT_FAILURE,
1302 "rte_gro_ctx_create() failed\n");
1306 #if defined RTE_LIBRTE_PMD_SOFTNIC
1307 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1308 RTE_ETH_FOREACH_DEV(pid) {
1310 const char *driver = port->dev_info.driver_name;
1312 if (strcmp(driver, "net_softnic") == 0)
1313 port->softport.fwd_lcore_arg = fwd_lcores;
1322 reconfig(portid_t new_port_id, unsigned socket_id)
1324 struct rte_port *port;
1327 /* Reconfiguration of Ethernet ports. */
1328 port = &ports[new_port_id];
1330 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1334 /* set flag to initialize port/queue */
1335 port->need_reconfig = 1;
1336 port->need_reconfig_queues = 1;
1337 port->socket_id = socket_id;
1344 init_fwd_streams(void)
1347 struct rte_port *port;
1348 streamid_t sm_id, nb_fwd_streams_new;
1351 /* set socket id according to numa or not */
1352 RTE_ETH_FOREACH_DEV(pid) {
1354 if (nb_rxq > port->dev_info.max_rx_queues) {
1355 printf("Fail: nb_rxq(%d) is greater than "
1356 "max_rx_queues(%d)\n", nb_rxq,
1357 port->dev_info.max_rx_queues);
1360 if (nb_txq > port->dev_info.max_tx_queues) {
1361 printf("Fail: nb_txq(%d) is greater than "
1362 "max_tx_queues(%d)\n", nb_txq,
1363 port->dev_info.max_tx_queues);
1367 if (port_numa[pid] != NUMA_NO_CONFIG)
1368 port->socket_id = port_numa[pid];
1370 port->socket_id = rte_eth_dev_socket_id(pid);
1373 * if socket_id is invalid,
1374 * set to the first available socket.
1376 if (check_socket_id(port->socket_id) < 0)
1377 port->socket_id = socket_ids[0];
1381 if (socket_num == UMA_NO_CONFIG)
1382 port->socket_id = 0;
1384 port->socket_id = socket_num;
1388 q = RTE_MAX(nb_rxq, nb_txq);
1390 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1393 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1394 if (nb_fwd_streams_new == nb_fwd_streams)
1397 if (fwd_streams != NULL) {
1398 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1399 if (fwd_streams[sm_id] == NULL)
1401 rte_free(fwd_streams[sm_id]);
1402 fwd_streams[sm_id] = NULL;
1404 rte_free(fwd_streams);
1409 nb_fwd_streams = nb_fwd_streams_new;
1410 if (nb_fwd_streams) {
1411 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1412 sizeof(struct fwd_stream *) * nb_fwd_streams,
1413 RTE_CACHE_LINE_SIZE);
1414 if (fwd_streams == NULL)
1415 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1416 " (struct fwd_stream *)) failed\n",
1419 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1420 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1421 " struct fwd_stream", sizeof(struct fwd_stream),
1422 RTE_CACHE_LINE_SIZE);
1423 if (fwd_streams[sm_id] == NULL)
1424 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1425 "(struct fwd_stream) failed\n");
1432 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1434 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1436 unsigned int total_burst;
1437 unsigned int nb_burst;
1438 unsigned int burst_stats[3];
1439 uint16_t pktnb_stats[3];
1441 int burst_percent[3];
1444 * First compute the total number of packet bursts and the
1445 * two highest numbers of bursts of the same number of packets.
1448 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1449 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1450 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1451 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1454 total_burst += nb_burst;
1455 if (nb_burst > burst_stats[0]) {
1456 burst_stats[1] = burst_stats[0];
1457 pktnb_stats[1] = pktnb_stats[0];
1458 burst_stats[0] = nb_burst;
1459 pktnb_stats[0] = nb_pkt;
1460 } else if (nb_burst > burst_stats[1]) {
1461 burst_stats[1] = nb_burst;
1462 pktnb_stats[1] = nb_pkt;
1465 if (total_burst == 0)
1467 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1468 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1469 burst_percent[0], (int) pktnb_stats[0]);
1470 if (burst_stats[0] == total_burst) {
1474 if (burst_stats[0] + burst_stats[1] == total_burst) {
1475 printf(" + %d%% of %d pkts]\n",
1476 100 - burst_percent[0], pktnb_stats[1]);
1479 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1480 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1481 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1482 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1485 printf(" + %d%% of %d pkts + %d%% of others]\n",
1486 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1488 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1491 fwd_stream_stats_display(streamid_t stream_id)
1493 struct fwd_stream *fs;
1494 static const char *fwd_top_stats_border = "-------";
1496 fs = fwd_streams[stream_id];
1497 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1498 (fs->fwd_dropped == 0))
1500 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1501 "TX Port=%2d/Queue=%2d %s\n",
1502 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1503 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1504 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1505 " TX-dropped: %-14"PRIu64,
1506 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1508 /* if checksum mode */
1509 if (cur_fwd_eng == &csum_fwd_engine) {
1510 printf(" RX- bad IP checksum: %-14"PRIu64
1511 " Rx- bad L4 checksum: %-14"PRIu64
1512 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1513 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1514 fs->rx_bad_outer_l4_csum);
1519 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1520 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1521 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1526 fwd_stats_display(void)
1528 static const char *fwd_stats_border = "----------------------";
1529 static const char *acc_stats_border = "+++++++++++++++";
1531 struct fwd_stream *rx_stream;
1532 struct fwd_stream *tx_stream;
1533 uint64_t tx_dropped;
1534 uint64_t rx_bad_ip_csum;
1535 uint64_t rx_bad_l4_csum;
1536 uint64_t rx_bad_outer_l4_csum;
1537 } ports_stats[RTE_MAX_ETHPORTS];
1538 uint64_t total_rx_dropped = 0;
1539 uint64_t total_tx_dropped = 0;
1540 uint64_t total_rx_nombuf = 0;
1541 struct rte_eth_stats stats;
1542 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1543 uint64_t fwd_cycles = 0;
1545 uint64_t total_recv = 0;
1546 uint64_t total_xmit = 0;
1547 struct rte_port *port;
1552 memset(ports_stats, 0, sizeof(ports_stats));
1554 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1555 struct fwd_stream *fs = fwd_streams[sm_id];
1557 if (cur_fwd_config.nb_fwd_streams >
1558 cur_fwd_config.nb_fwd_ports) {
1559 fwd_stream_stats_display(sm_id);
1561 ports_stats[fs->tx_port].tx_stream = fs;
1562 ports_stats[fs->rx_port].rx_stream = fs;
1565 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1567 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1568 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1569 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1570 fs->rx_bad_outer_l4_csum;
1572 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1573 fwd_cycles += fs->core_cycles;
1576 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1579 pt_id = fwd_ports_ids[i];
1580 port = &ports[pt_id];
1582 rte_eth_stats_get(pt_id, &stats);
1583 stats.ipackets -= port->stats.ipackets;
1584 stats.opackets -= port->stats.opackets;
1585 stats.ibytes -= port->stats.ibytes;
1586 stats.obytes -= port->stats.obytes;
1587 stats.imissed -= port->stats.imissed;
1588 stats.oerrors -= port->stats.oerrors;
1589 stats.rx_nombuf -= port->stats.rx_nombuf;
1591 total_recv += stats.ipackets;
1592 total_xmit += stats.opackets;
1593 total_rx_dropped += stats.imissed;
1594 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1595 total_tx_dropped += stats.oerrors;
1596 total_rx_nombuf += stats.rx_nombuf;
1598 printf("\n %s Forward statistics for port %-2d %s\n",
1599 fwd_stats_border, pt_id, fwd_stats_border);
1601 if (!port->rx_queue_stats_mapping_enabled &&
1602 !port->tx_queue_stats_mapping_enabled) {
1603 printf(" RX-packets: %-14"PRIu64
1604 " RX-dropped: %-14"PRIu64
1605 "RX-total: %-"PRIu64"\n",
1606 stats.ipackets, stats.imissed,
1607 stats.ipackets + stats.imissed);
1609 if (cur_fwd_eng == &csum_fwd_engine)
1610 printf(" Bad-ipcsum: %-14"PRIu64
1611 " Bad-l4csum: %-14"PRIu64
1612 "Bad-outer-l4csum: %-14"PRIu64"\n",
1613 ports_stats[pt_id].rx_bad_ip_csum,
1614 ports_stats[pt_id].rx_bad_l4_csum,
1615 ports_stats[pt_id].rx_bad_outer_l4_csum);
1616 if (stats.ierrors + stats.rx_nombuf > 0) {
1617 printf(" RX-error: %-"PRIu64"\n",
1619 printf(" RX-nombufs: %-14"PRIu64"\n",
1623 printf(" TX-packets: %-14"PRIu64
1624 " TX-dropped: %-14"PRIu64
1625 "TX-total: %-"PRIu64"\n",
1626 stats.opackets, ports_stats[pt_id].tx_dropped,
1627 stats.opackets + ports_stats[pt_id].tx_dropped);
1629 printf(" RX-packets: %14"PRIu64
1630 " RX-dropped:%14"PRIu64
1631 " RX-total:%14"PRIu64"\n",
1632 stats.ipackets, stats.imissed,
1633 stats.ipackets + stats.imissed);
1635 if (cur_fwd_eng == &csum_fwd_engine)
1636 printf(" Bad-ipcsum:%14"PRIu64
1637 " Bad-l4csum:%14"PRIu64
1638 " Bad-outer-l4csum: %-14"PRIu64"\n",
1639 ports_stats[pt_id].rx_bad_ip_csum,
1640 ports_stats[pt_id].rx_bad_l4_csum,
1641 ports_stats[pt_id].rx_bad_outer_l4_csum);
1642 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1643 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1644 printf(" RX-nombufs: %14"PRIu64"\n",
1648 printf(" TX-packets: %14"PRIu64
1649 " TX-dropped:%14"PRIu64
1650 " TX-total:%14"PRIu64"\n",
1651 stats.opackets, ports_stats[pt_id].tx_dropped,
1652 stats.opackets + ports_stats[pt_id].tx_dropped);
1655 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1656 if (ports_stats[pt_id].rx_stream)
1657 pkt_burst_stats_display("RX",
1658 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1659 if (ports_stats[pt_id].tx_stream)
1660 pkt_burst_stats_display("TX",
1661 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1664 if (port->rx_queue_stats_mapping_enabled) {
1666 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1667 printf(" Stats reg %2d RX-packets:%14"PRIu64
1668 " RX-errors:%14"PRIu64
1669 " RX-bytes:%14"PRIu64"\n",
1670 j, stats.q_ipackets[j],
1671 stats.q_errors[j], stats.q_ibytes[j]);
1675 if (port->tx_queue_stats_mapping_enabled) {
1676 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1677 printf(" Stats reg %2d TX-packets:%14"PRIu64
1680 j, stats.q_opackets[j],
1685 printf(" %s--------------------------------%s\n",
1686 fwd_stats_border, fwd_stats_border);
1689 printf("\n %s Accumulated forward statistics for all ports"
1691 acc_stats_border, acc_stats_border);
1692 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1694 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1696 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1697 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1698 if (total_rx_nombuf > 0)
1699 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1700 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1702 acc_stats_border, acc_stats_border);
1703 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1705 printf("\n CPU cycles/packet=%u (total cycles="
1706 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1707 (unsigned int)(fwd_cycles / total_recv),
1708 fwd_cycles, total_recv);
1713 fwd_stats_reset(void)
1719 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1720 pt_id = fwd_ports_ids[i];
1721 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1723 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1724 struct fwd_stream *fs = fwd_streams[sm_id];
1728 fs->fwd_dropped = 0;
1729 fs->rx_bad_ip_csum = 0;
1730 fs->rx_bad_l4_csum = 0;
1731 fs->rx_bad_outer_l4_csum = 0;
1733 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1734 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1735 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1737 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1738 fs->core_cycles = 0;
1744 flush_fwd_rx_queues(void)
1746 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1753 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1754 uint64_t timer_period;
1756 /* convert to number of cycles */
1757 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1759 for (j = 0; j < 2; j++) {
1760 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1761 for (rxq = 0; rxq < nb_rxq; rxq++) {
1762 port_id = fwd_ports_ids[rxp];
1764 * testpmd can stuck in the below do while loop
1765 * if rte_eth_rx_burst() always returns nonzero
1766 * packets. So timer is added to exit this loop
1767 * after 1sec timer expiry.
1769 prev_tsc = rte_rdtsc();
1771 nb_rx = rte_eth_rx_burst(port_id, rxq,
1772 pkts_burst, MAX_PKT_BURST);
1773 for (i = 0; i < nb_rx; i++)
1774 rte_pktmbuf_free(pkts_burst[i]);
1776 cur_tsc = rte_rdtsc();
1777 diff_tsc = cur_tsc - prev_tsc;
1778 timer_tsc += diff_tsc;
1779 } while ((nb_rx > 0) &&
1780 (timer_tsc < timer_period));
1784 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1789 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1791 struct fwd_stream **fsm;
1794 #ifdef RTE_LIBRTE_BITRATE
1795 uint64_t tics_per_1sec;
1796 uint64_t tics_datum;
1797 uint64_t tics_current;
1798 uint16_t i, cnt_ports;
1800 cnt_ports = nb_ports;
1801 tics_datum = rte_rdtsc();
1802 tics_per_1sec = rte_get_timer_hz();
1804 fsm = &fwd_streams[fc->stream_idx];
1805 nb_fs = fc->stream_nb;
1807 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1808 (*pkt_fwd)(fsm[sm_id]);
1809 #ifdef RTE_LIBRTE_BITRATE
1810 if (bitrate_enabled != 0 &&
1811 bitrate_lcore_id == rte_lcore_id()) {
1812 tics_current = rte_rdtsc();
1813 if (tics_current - tics_datum >= tics_per_1sec) {
1814 /* Periodic bitrate calculation */
1815 for (i = 0; i < cnt_ports; i++)
1816 rte_stats_bitrate_calc(bitrate_data,
1818 tics_datum = tics_current;
1822 #ifdef RTE_LIBRTE_LATENCY_STATS
1823 if (latencystats_enabled != 0 &&
1824 latencystats_lcore_id == rte_lcore_id())
1825 rte_latencystats_update();
1828 } while (! fc->stopped);
1832 start_pkt_forward_on_core(void *fwd_arg)
1834 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1835 cur_fwd_config.fwd_eng->packet_fwd);
1840 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1841 * Used to start communication flows in network loopback test configurations.
1844 run_one_txonly_burst_on_core(void *fwd_arg)
1846 struct fwd_lcore *fwd_lc;
1847 struct fwd_lcore tmp_lcore;
1849 fwd_lc = (struct fwd_lcore *) fwd_arg;
1850 tmp_lcore = *fwd_lc;
1851 tmp_lcore.stopped = 1;
1852 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1857 * Launch packet forwarding:
1858 * - Setup per-port forwarding context.
1859 * - launch logical cores with their forwarding configuration.
1862 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1864 port_fwd_begin_t port_fwd_begin;
1869 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1870 if (port_fwd_begin != NULL) {
1871 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1872 (*port_fwd_begin)(fwd_ports_ids[i]);
1874 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1875 lc_id = fwd_lcores_cpuids[i];
1876 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1877 fwd_lcores[i]->stopped = 0;
1878 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1879 fwd_lcores[i], lc_id);
1881 printf("launch lcore %u failed - diag=%d\n",
1888 * Launch packet forwarding configuration.
1891 start_packet_forwarding(int with_tx_first)
1893 port_fwd_begin_t port_fwd_begin;
1894 port_fwd_end_t port_fwd_end;
1895 struct rte_port *port;
1899 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1900 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1902 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1903 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1905 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1906 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1907 (!nb_rxq || !nb_txq))
1908 rte_exit(EXIT_FAILURE,
1909 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1910 cur_fwd_eng->fwd_mode_name);
1912 if (all_ports_started() == 0) {
1913 printf("Not all ports were started\n");
1916 if (test_done == 0) {
1917 printf("Packet forwarding already started\n");
1923 for (i = 0; i < nb_fwd_ports; i++) {
1924 pt_id = fwd_ports_ids[i];
1925 port = &ports[pt_id];
1926 if (!port->dcb_flag) {
1927 printf("In DCB mode, all forwarding ports must "
1928 "be configured in this mode.\n");
1932 if (nb_fwd_lcores == 1) {
1933 printf("In DCB mode,the nb forwarding cores "
1934 "should be larger than 1.\n");
1943 flush_fwd_rx_queues();
1945 pkt_fwd_config_display(&cur_fwd_config);
1946 rxtx_config_display();
1949 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1950 pt_id = fwd_ports_ids[i];
1951 port = &ports[pt_id];
1952 map_port_queue_stats_mapping_registers(pt_id, port);
1954 if (with_tx_first) {
1955 port_fwd_begin = tx_only_engine.port_fwd_begin;
1956 if (port_fwd_begin != NULL) {
1957 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1958 (*port_fwd_begin)(fwd_ports_ids[i]);
1960 while (with_tx_first--) {
1961 launch_packet_forwarding(
1962 run_one_txonly_burst_on_core);
1963 rte_eal_mp_wait_lcore();
1965 port_fwd_end = tx_only_engine.port_fwd_end;
1966 if (port_fwd_end != NULL) {
1967 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1968 (*port_fwd_end)(fwd_ports_ids[i]);
1971 launch_packet_forwarding(start_pkt_forward_on_core);
1975 stop_packet_forwarding(void)
1977 port_fwd_end_t port_fwd_end;
1983 printf("Packet forwarding not started\n");
1986 printf("Telling cores to stop...");
1987 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1988 fwd_lcores[lc_id]->stopped = 1;
1989 printf("\nWaiting for lcores to finish...\n");
1990 rte_eal_mp_wait_lcore();
1991 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1992 if (port_fwd_end != NULL) {
1993 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1994 pt_id = fwd_ports_ids[i];
1995 (*port_fwd_end)(pt_id);
1999 fwd_stats_display();
2001 printf("\nDone.\n");
2006 dev_set_link_up(portid_t pid)
2008 if (rte_eth_dev_set_link_up(pid) < 0)
2009 printf("\nSet link up fail.\n");
2013 dev_set_link_down(portid_t pid)
2015 if (rte_eth_dev_set_link_down(pid) < 0)
2016 printf("\nSet link down fail.\n");
2020 all_ports_started(void)
2023 struct rte_port *port;
2025 RTE_ETH_FOREACH_DEV(pi) {
2027 /* Check if there is a port which is not started */
2028 if ((port->port_status != RTE_PORT_STARTED) &&
2029 (port->slave_flag == 0))
2033 /* No port is not started */
2038 port_is_stopped(portid_t port_id)
2040 struct rte_port *port = &ports[port_id];
2042 if ((port->port_status != RTE_PORT_STOPPED) &&
2043 (port->slave_flag == 0))
2049 all_ports_stopped(void)
2053 RTE_ETH_FOREACH_DEV(pi) {
2054 if (!port_is_stopped(pi))
2062 port_is_started(portid_t port_id)
2064 if (port_id_is_invalid(port_id, ENABLED_WARN))
2067 if (ports[port_id].port_status != RTE_PORT_STARTED)
2073 /* Configure the Rx and Tx hairpin queues for the selected port. */
2075 setup_hairpin_queues(portid_t pi)
2078 struct rte_eth_hairpin_conf hairpin_conf = {
2083 struct rte_port *port = &ports[pi];
2085 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2086 hairpin_conf.peers[0].port = pi;
2087 hairpin_conf.peers[0].queue = i + nb_rxq;
2088 diag = rte_eth_tx_hairpin_queue_setup
2089 (pi, qi, nb_txd, &hairpin_conf);
2094 /* Fail to setup rx queue, return */
2095 if (rte_atomic16_cmpset(&(port->port_status),
2097 RTE_PORT_STOPPED) == 0)
2098 printf("Port %d can not be set back "
2099 "to stopped\n", pi);
2100 printf("Fail to configure port %d hairpin "
2102 /* try to reconfigure queues next time */
2103 port->need_reconfig_queues = 1;
2106 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2107 hairpin_conf.peers[0].port = pi;
2108 hairpin_conf.peers[0].queue = i + nb_txq;
2109 diag = rte_eth_rx_hairpin_queue_setup
2110 (pi, qi, nb_rxd, &hairpin_conf);
2115 /* Fail to setup rx queue, return */
2116 if (rte_atomic16_cmpset(&(port->port_status),
2118 RTE_PORT_STOPPED) == 0)
2119 printf("Port %d can not be set back "
2120 "to stopped\n", pi);
2121 printf("Fail to configure port %d hairpin "
2123 /* try to reconfigure queues next time */
2124 port->need_reconfig_queues = 1;
2131 start_port(portid_t pid)
2133 int diag, need_check_link_status = -1;
2136 struct rte_port *port;
2137 struct rte_ether_addr mac_addr;
2138 struct rte_eth_hairpin_cap cap;
2140 if (port_id_is_invalid(pid, ENABLED_WARN))
2145 RTE_ETH_FOREACH_DEV(pi) {
2146 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2149 need_check_link_status = 0;
2151 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2152 RTE_PORT_HANDLING) == 0) {
2153 printf("Port %d is now not stopped\n", pi);
2157 if (port->need_reconfig > 0) {
2158 port->need_reconfig = 0;
2160 if (flow_isolate_all) {
2161 int ret = port_flow_isolate(pi, 1);
2163 printf("Failed to apply isolated"
2164 " mode on port %d\n", pi);
2168 configure_rxtx_dump_callbacks(0);
2169 printf("Configuring Port %d (socket %u)\n", pi,
2171 if (nb_hairpinq > 0 &&
2172 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2173 printf("Port %d doesn't support hairpin "
2177 /* configure port */
2178 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2179 nb_txq + nb_hairpinq,
2182 if (rte_atomic16_cmpset(&(port->port_status),
2183 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2184 printf("Port %d can not be set back "
2185 "to stopped\n", pi);
2186 printf("Fail to configure port %d\n", pi);
2187 /* try to reconfigure port next time */
2188 port->need_reconfig = 1;
2192 if (port->need_reconfig_queues > 0) {
2193 port->need_reconfig_queues = 0;
2194 /* setup tx queues */
2195 for (qi = 0; qi < nb_txq; qi++) {
2196 if ((numa_support) &&
2197 (txring_numa[pi] != NUMA_NO_CONFIG))
2198 diag = rte_eth_tx_queue_setup(pi, qi,
2199 port->nb_tx_desc[qi],
2201 &(port->tx_conf[qi]));
2203 diag = rte_eth_tx_queue_setup(pi, qi,
2204 port->nb_tx_desc[qi],
2206 &(port->tx_conf[qi]));
2211 /* Fail to setup tx queue, return */
2212 if (rte_atomic16_cmpset(&(port->port_status),
2214 RTE_PORT_STOPPED) == 0)
2215 printf("Port %d can not be set back "
2216 "to stopped\n", pi);
2217 printf("Fail to configure port %d tx queues\n",
2219 /* try to reconfigure queues next time */
2220 port->need_reconfig_queues = 1;
2223 for (qi = 0; qi < nb_rxq; qi++) {
2224 /* setup rx queues */
2225 if ((numa_support) &&
2226 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2227 struct rte_mempool * mp =
2228 mbuf_pool_find(rxring_numa[pi]);
2230 printf("Failed to setup RX queue:"
2231 "No mempool allocation"
2232 " on the socket %d\n",
2237 diag = rte_eth_rx_queue_setup(pi, qi,
2238 port->nb_rx_desc[qi],
2240 &(port->rx_conf[qi]),
2243 struct rte_mempool *mp =
2244 mbuf_pool_find(port->socket_id);
2246 printf("Failed to setup RX queue:"
2247 "No mempool allocation"
2248 " on the socket %d\n",
2252 diag = rte_eth_rx_queue_setup(pi, qi,
2253 port->nb_rx_desc[qi],
2255 &(port->rx_conf[qi]),
2261 /* Fail to setup rx queue, return */
2262 if (rte_atomic16_cmpset(&(port->port_status),
2264 RTE_PORT_STOPPED) == 0)
2265 printf("Port %d can not be set back "
2266 "to stopped\n", pi);
2267 printf("Fail to configure port %d rx queues\n",
2269 /* try to reconfigure queues next time */
2270 port->need_reconfig_queues = 1;
2273 /* setup hairpin queues */
2274 if (setup_hairpin_queues(pi) != 0)
2277 configure_rxtx_dump_callbacks(verbose_level);
2279 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2283 "Port %d: Failed to disable Ptype parsing\n",
2288 if (rte_eth_dev_start(pi) < 0) {
2289 printf("Fail to start port %d\n", pi);
2291 /* Fail to setup rx queue, return */
2292 if (rte_atomic16_cmpset(&(port->port_status),
2293 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2294 printf("Port %d can not be set back to "
2299 if (rte_atomic16_cmpset(&(port->port_status),
2300 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2301 printf("Port %d can not be set into started\n", pi);
2303 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2304 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2305 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2306 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2307 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2309 /* at least one port started, need checking link status */
2310 need_check_link_status = 1;
2313 if (need_check_link_status == 1 && !no_link_check)
2314 check_all_ports_link_status(RTE_PORT_ALL);
2315 else if (need_check_link_status == 0)
2316 printf("Please stop the ports first\n");
2323 stop_port(portid_t pid)
2326 struct rte_port *port;
2327 int need_check_link_status = 0;
2334 if (port_id_is_invalid(pid, ENABLED_WARN))
2337 printf("Stopping ports...\n");
2339 RTE_ETH_FOREACH_DEV(pi) {
2340 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2343 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2344 printf("Please remove port %d from forwarding configuration.\n", pi);
2348 if (port_is_bonding_slave(pi)) {
2349 printf("Please remove port %d from bonded device.\n", pi);
2354 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2355 RTE_PORT_HANDLING) == 0)
2358 rte_eth_dev_stop(pi);
2360 if (rte_atomic16_cmpset(&(port->port_status),
2361 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2362 printf("Port %d can not be set into stopped\n", pi);
2363 need_check_link_status = 1;
2365 if (need_check_link_status && !no_link_check)
2366 check_all_ports_link_status(RTE_PORT_ALL);
2372 remove_invalid_ports_in(portid_t *array, portid_t *total)
2375 portid_t new_total = 0;
2377 for (i = 0; i < *total; i++)
2378 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2379 array[new_total] = array[i];
2386 remove_invalid_ports(void)
2388 remove_invalid_ports_in(ports_ids, &nb_ports);
2389 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2390 nb_cfg_ports = nb_fwd_ports;
2394 close_port(portid_t pid)
2397 struct rte_port *port;
2399 if (port_id_is_invalid(pid, ENABLED_WARN))
2402 printf("Closing ports...\n");
2404 RTE_ETH_FOREACH_DEV(pi) {
2405 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2408 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2409 printf("Please remove port %d from forwarding configuration.\n", pi);
2413 if (port_is_bonding_slave(pi)) {
2414 printf("Please remove port %d from bonded device.\n", pi);
2419 if (rte_atomic16_cmpset(&(port->port_status),
2420 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2421 printf("Port %d is already closed\n", pi);
2425 if (rte_atomic16_cmpset(&(port->port_status),
2426 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2427 printf("Port %d is now not stopped\n", pi);
2431 if (port->flow_list)
2432 port_flow_flush(pi);
2433 rte_eth_dev_close(pi);
2435 remove_invalid_ports();
2437 if (rte_atomic16_cmpset(&(port->port_status),
2438 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2439 printf("Port %d cannot be set to closed\n", pi);
2446 reset_port(portid_t pid)
2450 struct rte_port *port;
2452 if (port_id_is_invalid(pid, ENABLED_WARN))
2455 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2456 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2457 printf("Can not reset port(s), please stop port(s) first.\n");
2461 printf("Resetting ports...\n");
2463 RTE_ETH_FOREACH_DEV(pi) {
2464 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2467 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2468 printf("Please remove port %d from forwarding "
2469 "configuration.\n", pi);
2473 if (port_is_bonding_slave(pi)) {
2474 printf("Please remove port %d from bonded device.\n",
2479 diag = rte_eth_dev_reset(pi);
2482 port->need_reconfig = 1;
2483 port->need_reconfig_queues = 1;
2485 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2493 attach_port(char *identifier)
2496 struct rte_dev_iterator iterator;
2498 printf("Attaching a new port...\n");
2500 if (identifier == NULL) {
2501 printf("Invalid parameters are specified\n");
2505 if (rte_dev_probe(identifier) < 0) {
2506 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2510 /* first attach mode: event */
2511 if (setup_on_probe_event) {
2512 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2513 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2514 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2515 ports[pi].need_setup != 0)
2516 setup_attached_port(pi);
2520 /* second attach mode: iterator */
2521 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2522 /* setup ports matching the devargs used for probing */
2523 if (port_is_forwarding(pi))
2524 continue; /* port was already attached before */
2525 setup_attached_port(pi);
2530 setup_attached_port(portid_t pi)
2532 unsigned int socket_id;
2535 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2536 /* if socket_id is invalid, set to the first available socket. */
2537 if (check_socket_id(socket_id) < 0)
2538 socket_id = socket_ids[0];
2539 reconfig(pi, socket_id);
2540 ret = rte_eth_promiscuous_enable(pi);
2542 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2543 pi, rte_strerror(-ret));
2545 ports_ids[nb_ports++] = pi;
2546 fwd_ports_ids[nb_fwd_ports++] = pi;
2547 nb_cfg_ports = nb_fwd_ports;
2548 ports[pi].need_setup = 0;
2549 ports[pi].port_status = RTE_PORT_STOPPED;
2551 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2556 detach_port_device(portid_t port_id)
2558 struct rte_device *dev;
2561 printf("Removing a device...\n");
2563 if (port_id_is_invalid(port_id, ENABLED_WARN))
2566 dev = rte_eth_devices[port_id].device;
2568 printf("Device already removed\n");
2572 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2573 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2574 printf("Port not stopped\n");
2577 printf("Port was not closed\n");
2578 if (ports[port_id].flow_list)
2579 port_flow_flush(port_id);
2582 if (rte_dev_remove(dev) < 0) {
2583 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2586 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2587 /* reset mapping between old ports and removed device */
2588 rte_eth_devices[sibling].device = NULL;
2589 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2590 /* sibling ports are forced to be closed */
2591 ports[sibling].port_status = RTE_PORT_CLOSED;
2592 printf("Port %u is closed\n", sibling);
2596 remove_invalid_ports();
2598 printf("Device of port %u is detached\n", port_id);
2599 printf("Now total ports is %d\n", nb_ports);
2605 detach_device(char *identifier)
2607 struct rte_dev_iterator iterator;
2608 struct rte_devargs da;
2611 printf("Removing a device...\n");
2613 memset(&da, 0, sizeof(da));
2614 if (rte_devargs_parsef(&da, "%s", identifier)) {
2615 printf("cannot parse identifier\n");
2621 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2622 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2623 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2624 printf("Port %u not stopped\n", port_id);
2625 rte_eth_iterator_cleanup(&iterator);
2629 /* sibling ports are forced to be closed */
2630 if (ports[port_id].flow_list)
2631 port_flow_flush(port_id);
2632 ports[port_id].port_status = RTE_PORT_CLOSED;
2633 printf("Port %u is now closed\n", port_id);
2637 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2638 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2639 da.name, da.bus->name);
2643 remove_invalid_ports();
2645 printf("Device %s is detached\n", identifier);
2646 printf("Now total ports is %d\n", nb_ports);
2658 stop_packet_forwarding();
2660 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2662 if (mp_alloc_type == MP_ALLOC_ANON)
2663 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2667 if (ports != NULL) {
2669 RTE_ETH_FOREACH_DEV(pt_id) {
2670 printf("\nStopping port %d...\n", pt_id);
2674 RTE_ETH_FOREACH_DEV(pt_id) {
2675 printf("\nShutting down port %d...\n", pt_id);
2682 ret = rte_dev_event_monitor_stop();
2685 "fail to stop device event monitor.");
2689 ret = rte_dev_event_callback_unregister(NULL,
2690 dev_event_callback, NULL);
2693 "fail to unregister device event callback.\n");
2697 ret = rte_dev_hotplug_handle_disable();
2700 "fail to disable hotplug handling.\n");
2704 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2706 rte_mempool_free(mempools[i]);
2709 printf("\nBye...\n");
2712 typedef void (*cmd_func_t)(void);
2713 struct pmd_test_command {
2714 const char *cmd_name;
2715 cmd_func_t cmd_func;
2718 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2720 /* Check the link status of all ports in up to 9s, and print them finally */
2722 check_all_ports_link_status(uint32_t port_mask)
2724 #define CHECK_INTERVAL 100 /* 100ms */
2725 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2727 uint8_t count, all_ports_up, print_flag = 0;
2728 struct rte_eth_link link;
2731 printf("Checking link statuses...\n");
2733 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2735 RTE_ETH_FOREACH_DEV(portid) {
2736 if ((port_mask & (1 << portid)) == 0)
2738 memset(&link, 0, sizeof(link));
2739 ret = rte_eth_link_get_nowait(portid, &link);
2742 if (print_flag == 1)
2743 printf("Port %u link get failed: %s\n",
2744 portid, rte_strerror(-ret));
2747 /* print link status if flag set */
2748 if (print_flag == 1) {
2749 if (link.link_status)
2751 "Port%d Link Up. speed %u Mbps- %s\n",
2752 portid, link.link_speed,
2753 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2754 ("full-duplex") : ("half-duplex\n"));
2756 printf("Port %d Link Down\n", portid);
2759 /* clear all_ports_up flag if any link down */
2760 if (link.link_status == ETH_LINK_DOWN) {
2765 /* after finally printing all link status, get out */
2766 if (print_flag == 1)
2769 if (all_ports_up == 0) {
2771 rte_delay_ms(CHECK_INTERVAL);
2774 /* set the print_flag if all ports up or timeout */
2775 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2785 * This callback is for remove a port for a device. It has limitation because
2786 * it is not for multiple port removal for a device.
2787 * TODO: the device detach invoke will plan to be removed from user side to
2788 * eal. And convert all PMDs to free port resources on ether device closing.
2791 rmv_port_callback(void *arg)
2793 int need_to_start = 0;
2794 int org_no_link_check = no_link_check;
2795 portid_t port_id = (intptr_t)arg;
2797 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2799 if (!test_done && port_is_forwarding(port_id)) {
2801 stop_packet_forwarding();
2805 no_link_check = org_no_link_check;
2806 close_port(port_id);
2807 detach_port_device(port_id);
2809 start_packet_forwarding(0);
2812 /* This function is used by the interrupt thread */
2814 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2817 RTE_SET_USED(param);
2818 RTE_SET_USED(ret_param);
2820 if (type >= RTE_ETH_EVENT_MAX) {
2821 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2822 port_id, __func__, type);
2824 } else if (event_print_mask & (UINT32_C(1) << type)) {
2825 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2826 eth_event_desc[type]);
2831 case RTE_ETH_EVENT_NEW:
2832 ports[port_id].need_setup = 1;
2833 ports[port_id].port_status = RTE_PORT_HANDLING;
2835 case RTE_ETH_EVENT_INTR_RMV:
2836 if (port_id_is_invalid(port_id, DISABLED_WARN))
2838 if (rte_eal_alarm_set(100000,
2839 rmv_port_callback, (void *)(intptr_t)port_id))
2840 fprintf(stderr, "Could not set up deferred device removal\n");
2849 register_eth_event_callback(void)
2852 enum rte_eth_event_type event;
2854 for (event = RTE_ETH_EVENT_UNKNOWN;
2855 event < RTE_ETH_EVENT_MAX; event++) {
2856 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2861 TESTPMD_LOG(ERR, "Failed to register callback for "
2862 "%s event\n", eth_event_desc[event]);
2870 /* This function is used by the interrupt thread */
2872 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2873 __rte_unused void *arg)
2878 if (type >= RTE_DEV_EVENT_MAX) {
2879 fprintf(stderr, "%s called upon invalid event %d\n",
2885 case RTE_DEV_EVENT_REMOVE:
2886 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2888 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2890 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2895 * Because the user's callback is invoked in eal interrupt
2896 * callback, the interrupt callback need to be finished before
2897 * it can be unregistered when detaching device. So finish
2898 * callback soon and use a deferred removal to detach device
2899 * is need. It is a workaround, once the device detaching be
2900 * moved into the eal in the future, the deferred removal could
2903 if (rte_eal_alarm_set(100000,
2904 rmv_port_callback, (void *)(intptr_t)port_id))
2906 "Could not set up deferred device removal\n");
2908 case RTE_DEV_EVENT_ADD:
2909 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2911 /* TODO: After finish kernel driver binding,
2912 * begin to attach port.
2921 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2925 uint8_t mapping_found = 0;
2927 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2928 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2929 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2930 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2931 tx_queue_stats_mappings[i].queue_id,
2932 tx_queue_stats_mappings[i].stats_counter_id);
2939 port->tx_queue_stats_mapping_enabled = 1;
2944 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2948 uint8_t mapping_found = 0;
2950 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2951 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2952 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2953 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2954 rx_queue_stats_mappings[i].queue_id,
2955 rx_queue_stats_mappings[i].stats_counter_id);
2962 port->rx_queue_stats_mapping_enabled = 1;
2967 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2971 diag = set_tx_queue_stats_mapping_registers(pi, port);
2973 if (diag == -ENOTSUP) {
2974 port->tx_queue_stats_mapping_enabled = 0;
2975 printf("TX queue stats mapping not supported port id=%d\n", pi);
2978 rte_exit(EXIT_FAILURE,
2979 "set_tx_queue_stats_mapping_registers "
2980 "failed for port id=%d diag=%d\n",
2984 diag = set_rx_queue_stats_mapping_registers(pi, port);
2986 if (diag == -ENOTSUP) {
2987 port->rx_queue_stats_mapping_enabled = 0;
2988 printf("RX queue stats mapping not supported port id=%d\n", pi);
2991 rte_exit(EXIT_FAILURE,
2992 "set_rx_queue_stats_mapping_registers "
2993 "failed for port id=%d diag=%d\n",
2999 rxtx_port_config(struct rte_port *port)
3004 for (qid = 0; qid < nb_rxq; qid++) {
3005 offloads = port->rx_conf[qid].offloads;
3006 port->rx_conf[qid] = port->dev_info.default_rxconf;
3008 port->rx_conf[qid].offloads = offloads;
3010 /* Check if any Rx parameters have been passed */
3011 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3012 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3014 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3015 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3017 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3018 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3020 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3021 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3023 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3024 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3026 port->nb_rx_desc[qid] = nb_rxd;
3029 for (qid = 0; qid < nb_txq; qid++) {
3030 offloads = port->tx_conf[qid].offloads;
3031 port->tx_conf[qid] = port->dev_info.default_txconf;
3033 port->tx_conf[qid].offloads = offloads;
3035 /* Check if any Tx parameters have been passed */
3036 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3037 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3039 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3040 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3042 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3043 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3045 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3046 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3048 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3049 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3051 port->nb_tx_desc[qid] = nb_txd;
3056 init_port_config(void)
3059 struct rte_port *port;
3062 RTE_ETH_FOREACH_DEV(pid) {
3064 port->dev_conf.fdir_conf = fdir_conf;
3066 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3071 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3072 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3073 rss_hf & port->dev_info.flow_type_rss_offloads;
3075 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3076 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3079 if (port->dcb_flag == 0) {
3080 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3081 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
3083 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3086 rxtx_port_config(port);
3088 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3092 map_port_queue_stats_mapping_registers(pid, port);
3093 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3094 rte_pmd_ixgbe_bypass_init(pid);
3097 if (lsc_interrupt &&
3098 (rte_eth_devices[pid].data->dev_flags &
3099 RTE_ETH_DEV_INTR_LSC))
3100 port->dev_conf.intr_conf.lsc = 1;
3101 if (rmv_interrupt &&
3102 (rte_eth_devices[pid].data->dev_flags &
3103 RTE_ETH_DEV_INTR_RMV))
3104 port->dev_conf.intr_conf.rmv = 1;
3108 void set_port_slave_flag(portid_t slave_pid)
3110 struct rte_port *port;
3112 port = &ports[slave_pid];
3113 port->slave_flag = 1;
3116 void clear_port_slave_flag(portid_t slave_pid)
3118 struct rte_port *port;
3120 port = &ports[slave_pid];
3121 port->slave_flag = 0;
3124 uint8_t port_is_bonding_slave(portid_t slave_pid)
3126 struct rte_port *port;
3128 port = &ports[slave_pid];
3129 if ((rte_eth_devices[slave_pid].data->dev_flags &
3130 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3135 const uint16_t vlan_tags[] = {
3136 0, 1, 2, 3, 4, 5, 6, 7,
3137 8, 9, 10, 11, 12, 13, 14, 15,
3138 16, 17, 18, 19, 20, 21, 22, 23,
3139 24, 25, 26, 27, 28, 29, 30, 31
3143 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3144 enum dcb_mode_enable dcb_mode,
3145 enum rte_eth_nb_tcs num_tcs,
3150 struct rte_eth_rss_conf rss_conf;
3153 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3154 * given above, and the number of traffic classes available for use.
3156 if (dcb_mode == DCB_VT_ENABLED) {
3157 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3158 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3159 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3160 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3162 /* VMDQ+DCB RX and TX configurations */
3163 vmdq_rx_conf->enable_default_pool = 0;
3164 vmdq_rx_conf->default_pool = 0;
3165 vmdq_rx_conf->nb_queue_pools =
3166 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3167 vmdq_tx_conf->nb_queue_pools =
3168 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3170 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3171 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3172 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3173 vmdq_rx_conf->pool_map[i].pools =
3174 1 << (i % vmdq_rx_conf->nb_queue_pools);
3176 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3177 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3178 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3181 /* set DCB mode of RX and TX of multiple queues */
3182 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
3183 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3185 struct rte_eth_dcb_rx_conf *rx_conf =
3186 ð_conf->rx_adv_conf.dcb_rx_conf;
3187 struct rte_eth_dcb_tx_conf *tx_conf =
3188 ð_conf->tx_adv_conf.dcb_tx_conf;
3190 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3194 rx_conf->nb_tcs = num_tcs;
3195 tx_conf->nb_tcs = num_tcs;
3197 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3198 rx_conf->dcb_tc[i] = i % num_tcs;
3199 tx_conf->dcb_tc[i] = i % num_tcs;
3202 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3203 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3204 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3208 eth_conf->dcb_capability_en =
3209 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3211 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3217 init_port_dcb_config(portid_t pid,
3218 enum dcb_mode_enable dcb_mode,
3219 enum rte_eth_nb_tcs num_tcs,
3222 struct rte_eth_conf port_conf;
3223 struct rte_port *rte_port;
3227 rte_port = &ports[pid];
3229 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3230 /* Enter DCB configuration status */
3233 port_conf.rxmode = rte_port->dev_conf.rxmode;
3234 port_conf.txmode = rte_port->dev_conf.txmode;
3236 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3237 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3240 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3242 /* re-configure the device . */
3243 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3247 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3251 /* If dev_info.vmdq_pool_base is greater than 0,
3252 * the queue id of vmdq pools is started after pf queues.
3254 if (dcb_mode == DCB_VT_ENABLED &&
3255 rte_port->dev_info.vmdq_pool_base > 0) {
3256 printf("VMDQ_DCB multi-queue mode is nonsensical"
3257 " for port %d.", pid);
3261 /* Assume the ports in testpmd have the same dcb capability
3262 * and has the same number of rxq and txq in dcb mode
3264 if (dcb_mode == DCB_VT_ENABLED) {
3265 if (rte_port->dev_info.max_vfs > 0) {
3266 nb_rxq = rte_port->dev_info.nb_rx_queues;
3267 nb_txq = rte_port->dev_info.nb_tx_queues;
3269 nb_rxq = rte_port->dev_info.max_rx_queues;
3270 nb_txq = rte_port->dev_info.max_tx_queues;
3273 /*if vt is disabled, use all pf queues */
3274 if (rte_port->dev_info.vmdq_pool_base == 0) {
3275 nb_rxq = rte_port->dev_info.max_rx_queues;
3276 nb_txq = rte_port->dev_info.max_tx_queues;
3278 nb_rxq = (queueid_t)num_tcs;
3279 nb_txq = (queueid_t)num_tcs;
3283 rx_free_thresh = 64;
3285 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3287 rxtx_port_config(rte_port);
3289 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3290 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3291 rx_vft_set(pid, vlan_tags[i], 1);
3293 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3297 map_port_queue_stats_mapping_registers(pid, rte_port);
3299 rte_port->dcb_flag = 1;
3307 /* Configuration of Ethernet ports. */
3308 ports = rte_zmalloc("testpmd: ports",
3309 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3310 RTE_CACHE_LINE_SIZE);
3311 if (ports == NULL) {
3312 rte_exit(EXIT_FAILURE,
3313 "rte_zmalloc(%d struct rte_port) failed\n",
3317 /* Initialize ports NUMA structures */
3318 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3319 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3320 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3334 const char clr[] = { 27, '[', '2', 'J', '\0' };
3335 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3337 /* Clear screen and move to top left */
3338 printf("%s%s", clr, top_left);
3340 printf("\nPort statistics ====================================");
3341 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3342 nic_stats_display(fwd_ports_ids[i]);
3348 signal_handler(int signum)
3350 if (signum == SIGINT || signum == SIGTERM) {
3351 printf("\nSignal %d received, preparing to exit...\n",
3353 #ifdef RTE_LIBRTE_PDUMP
3354 /* uninitialize packet capture framework */
3357 #ifdef RTE_LIBRTE_LATENCY_STATS
3358 if (latencystats_enabled != 0)
3359 rte_latencystats_uninit();
3362 /* Set flag to indicate the force termination. */
3364 /* exit with the expected status */
3365 signal(signum, SIG_DFL);
3366 kill(getpid(), signum);
3371 main(int argc, char** argv)
3378 signal(SIGINT, signal_handler);
3379 signal(SIGTERM, signal_handler);
3381 testpmd_logtype = rte_log_register("testpmd");
3382 if (testpmd_logtype < 0)
3383 rte_exit(EXIT_FAILURE, "Cannot register log type");
3384 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3386 diag = rte_eal_init(argc, argv);
3388 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3389 rte_strerror(rte_errno));
3391 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3392 rte_exit(EXIT_FAILURE,
3393 "Secondary process type not supported.\n");
3395 ret = register_eth_event_callback();
3397 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3399 #ifdef RTE_LIBRTE_PDUMP
3400 /* initialize packet capture framework */
3405 RTE_ETH_FOREACH_DEV(port_id) {
3406 ports_ids[count] = port_id;
3409 nb_ports = (portid_t) count;
3411 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3413 /* allocate port structures, and init them */
3416 set_def_fwd_config();
3418 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3419 "Check the core mask argument\n");
3421 /* Bitrate/latency stats disabled by default */
3422 #ifdef RTE_LIBRTE_BITRATE
3423 bitrate_enabled = 0;
3425 #ifdef RTE_LIBRTE_LATENCY_STATS
3426 latencystats_enabled = 0;
3429 /* on FreeBSD, mlockall() is disabled by default */
3430 #ifdef RTE_EXEC_ENV_FREEBSD
3439 launch_args_parse(argc, argv);
3441 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3442 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3446 if (tx_first && interactive)
3447 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3448 "interactive mode.\n");
3450 if (tx_first && lsc_interrupt) {
3451 printf("Warning: lsc_interrupt needs to be off when "
3452 " using tx_first. Disabling.\n");
3456 if (!nb_rxq && !nb_txq)
3457 printf("Warning: Either rx or tx queues should be non-zero\n");
3459 if (nb_rxq > 1 && nb_rxq > nb_txq)
3460 printf("Warning: nb_rxq=%d enables RSS configuration, "
3461 "but nb_txq=%d will prevent to fully test it.\n",
3467 ret = rte_dev_hotplug_handle_enable();
3470 "fail to enable hotplug handling.");
3474 ret = rte_dev_event_monitor_start();
3477 "fail to start device event monitoring.");
3481 ret = rte_dev_event_callback_register(NULL,
3482 dev_event_callback, NULL);
3485 "fail to register device event callback\n");
3490 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3491 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3493 /* set all ports to promiscuous mode by default */
3494 RTE_ETH_FOREACH_DEV(port_id) {
3495 ret = rte_eth_promiscuous_enable(port_id);
3497 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3498 port_id, rte_strerror(-ret));
3501 /* Init metrics library */
3502 rte_metrics_init(rte_socket_id());
3504 #ifdef RTE_LIBRTE_LATENCY_STATS
3505 if (latencystats_enabled != 0) {
3506 int ret = rte_latencystats_init(1, NULL);
3508 printf("Warning: latencystats init()"
3509 " returned error %d\n", ret);
3510 printf("Latencystats running on lcore %d\n",
3511 latencystats_lcore_id);
3515 /* Setup bitrate stats */
3516 #ifdef RTE_LIBRTE_BITRATE
3517 if (bitrate_enabled != 0) {
3518 bitrate_data = rte_stats_bitrate_create();
3519 if (bitrate_data == NULL)
3520 rte_exit(EXIT_FAILURE,
3521 "Could not allocate bitrate data.\n");
3522 rte_stats_bitrate_reg(bitrate_data);
3526 #ifdef RTE_LIBRTE_CMDLINE
3527 if (strlen(cmdline_filename) != 0)
3528 cmdline_read_from_file(cmdline_filename);
3530 if (interactive == 1) {
3532 printf("Start automatic packet forwarding\n");
3533 start_packet_forwarding(0);
3545 printf("No commandline core given, start packet forwarding\n");
3546 start_packet_forwarding(tx_first);
3547 if (stats_period != 0) {
3548 uint64_t prev_time = 0, cur_time, diff_time = 0;
3549 uint64_t timer_period;
3551 /* Convert to number of cycles */
3552 timer_period = stats_period * rte_get_timer_hz();
3554 while (f_quit == 0) {
3555 cur_time = rte_get_timer_cycles();
3556 diff_time += cur_time - prev_time;
3558 if (diff_time >= timer_period) {
3560 /* Reset the timer */
3563 /* Sleep to avoid unnecessary checks */
3564 prev_time = cur_time;
3569 printf("Press enter to exit\n");
3570 rc = read(0, &c, 1);
3576 ret = rte_eal_cleanup();
3578 rte_exit(EXIT_FAILURE,
3579 "EAL cleanup failed: %s\n", strerror(-ret));
3581 return EXIT_SUCCESS;