1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_memory.h>
31 #include <rte_memcpy.h>
32 #include <rte_launch.h>
34 #include <rte_alarm.h>
35 #include <rte_per_lcore.h>
36 #include <rte_lcore.h>
37 #include <rte_atomic.h>
38 #include <rte_branch_prediction.h>
39 #include <rte_mempool.h>
40 #include <rte_malloc.h>
42 #include <rte_mbuf_pool_ops.h>
43 #include <rte_interrupts.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #ifdef RTE_LIBRTE_IXGBE_PMD
50 #include <rte_pmd_ixgbe.h>
52 #ifdef RTE_LIBRTE_PDUMP
53 #include <rte_pdump.h>
56 #include <rte_metrics.h>
57 #ifdef RTE_LIBRTE_BITRATESTATS
58 #include <rte_bitrate.h>
60 #ifdef RTE_LIBRTE_LATENCY_STATS
61 #include <rte_latencystats.h>
67 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
68 #define HUGE_FLAG (0x40000)
70 #define HUGE_FLAG MAP_HUGETLB
73 #ifndef MAP_HUGE_SHIFT
74 /* older kernels (or FreeBSD) will not have this define */
75 #define HUGE_SHIFT (26)
77 #define HUGE_SHIFT MAP_HUGE_SHIFT
80 #define EXTMEM_HEAP_NAME "extmem"
81 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 &five_tuple_swap_fwd_engine,
183 #ifdef RTE_LIBRTE_IEEE1588
184 &ieee1588_fwd_engine,
189 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
190 uint16_t mempool_flags;
192 struct fwd_config cur_fwd_config;
193 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
194 uint32_t retry_enabled;
195 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
196 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
198 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
199 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
200 * specified on command-line. */
201 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
204 * In container, it cannot terminate the process which running with 'stats-period'
205 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
210 * Configuration of packet segments used by the "txonly" processing engine.
212 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
213 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
214 TXONLY_DEF_PACKET_LEN,
216 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
218 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
219 /**< Split policy for packets to TX. */
221 uint8_t txonly_multi_flow;
222 /**< Whether multiple flows are generated in TXONLY mode. */
224 uint32_t tx_pkt_times_inter;
225 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
227 uint32_t tx_pkt_times_intra;
228 /**< Timings for send scheduling in TXONLY mode, time between packets. */
230 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
231 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
233 /* current configuration is in DCB or not,0 means it is not in DCB mode */
234 uint8_t dcb_config = 0;
236 /* Whether the dcb is in testing status */
237 uint8_t dcb_test = 0;
240 * Configurable number of RX/TX queues.
242 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
243 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
244 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
247 * Configurable number of RX/TX ring descriptors.
248 * Defaults are supplied by drivers via ethdev.
250 #define RTE_TEST_RX_DESC_DEFAULT 0
251 #define RTE_TEST_TX_DESC_DEFAULT 0
252 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
253 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
255 #define RTE_PMD_PARAM_UNSET -1
257 * Configurable values of RX and TX ring threshold registers.
260 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
261 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
262 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
264 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
265 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
266 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX free threshold.
271 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
274 * Configurable value of RX drop enable.
276 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX free threshold.
281 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of TX RS bit threshold.
286 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
289 * Configurable value of buffered packets before sending.
291 uint16_t noisy_tx_sw_bufsz;
294 * Configurable value of packet buffer timeout.
296 uint16_t noisy_tx_sw_buf_flush_time;
299 * Configurable value for size of VNF internal memory area
300 * used for simulating noisy neighbour behaviour
302 uint64_t noisy_lkup_mem_sz;
305 * Configurable value of number of random writes done in
306 * VNF simulation memory area.
308 uint64_t noisy_lkup_num_writes;
311 * Configurable value of number of random reads done in
312 * VNF simulation memory area.
314 uint64_t noisy_lkup_num_reads;
317 * Configurable value of number of random reads/writes done in
318 * VNF simulation memory area.
320 uint64_t noisy_lkup_num_reads_writes;
323 * Receive Side Scaling (RSS) configuration.
325 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
328 * Port topology configuration
330 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
333 * Avoids to flush all the RX streams before starts forwarding.
335 uint8_t no_flush_rx = 0; /* flush by default */
338 * Flow API isolated mode.
340 uint8_t flow_isolate_all;
343 * Avoids to check link status when starting/stopping a port.
345 uint8_t no_link_check = 0; /* check by default */
348 * Don't automatically start all ports in interactive mode.
350 uint8_t no_device_start = 0;
353 * Enable link status change notification
355 uint8_t lsc_interrupt = 1; /* enabled by default */
358 * Enable device removal notification.
360 uint8_t rmv_interrupt = 1; /* enabled by default */
362 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
364 /* After attach, port setup is called on event or by iterator */
365 bool setup_on_probe_event = true;
367 /* Clear ptypes on port initialization. */
368 uint8_t clear_ptypes = true;
370 /* Pretty printing of ethdev events */
371 static const char * const eth_event_desc[] = {
372 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
373 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
374 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
375 [RTE_ETH_EVENT_INTR_RESET] = "reset",
376 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
377 [RTE_ETH_EVENT_IPSEC] = "IPsec",
378 [RTE_ETH_EVENT_MACSEC] = "MACsec",
379 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
380 [RTE_ETH_EVENT_NEW] = "device probed",
381 [RTE_ETH_EVENT_DESTROY] = "device released",
382 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
383 [RTE_ETH_EVENT_MAX] = NULL,
387 * Display or mask ether events
388 * Default to all events except VF_MBOX
390 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
391 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
392 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
393 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
394 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
395 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
396 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
397 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
399 * Decide if all memory are locked for performance.
404 * NIC bypass mode configuration options.
407 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
408 /* The NIC bypass watchdog timeout. */
409 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
413 #ifdef RTE_LIBRTE_LATENCY_STATS
416 * Set when latency stats is enabled in the commandline
418 uint8_t latencystats_enabled;
421 * Lcore ID to serive latency statistics.
423 lcoreid_t latencystats_lcore_id = -1;
428 * Ethernet device configuration.
430 struct rte_eth_rxmode rx_mode = {
431 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
432 /**< Default maximum frame length. */
435 struct rte_eth_txmode tx_mode = {
436 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
439 struct rte_fdir_conf fdir_conf = {
440 .mode = RTE_FDIR_MODE_NONE,
441 .pballoc = RTE_FDIR_PBALLOC_64K,
442 .status = RTE_FDIR_REPORT_STATUS,
444 .vlan_tci_mask = 0xFFEF,
446 .src_ip = 0xFFFFFFFF,
447 .dst_ip = 0xFFFFFFFF,
450 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
451 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
453 .src_port_mask = 0xFFFF,
454 .dst_port_mask = 0xFFFF,
455 .mac_addr_byte_mask = 0xFF,
456 .tunnel_type_mask = 1,
457 .tunnel_id_mask = 0xFFFFFFFF,
462 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
464 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
465 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
467 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
468 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
470 uint16_t nb_tx_queue_stats_mappings = 0;
471 uint16_t nb_rx_queue_stats_mappings = 0;
474 * Display zero values by default for xstats
476 uint8_t xstats_hide_zero;
479 * Measure of CPU cycles disabled by default
481 uint8_t record_core_cycles;
484 * Display of RX and TX bursts disabled by default
486 uint8_t record_burst_stats;
488 unsigned int num_sockets = 0;
489 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
491 #ifdef RTE_LIBRTE_BITRATESTATS
492 /* Bitrate statistics */
493 struct rte_stats_bitrates *bitrate_data;
494 lcoreid_t bitrate_lcore_id;
495 uint8_t bitrate_enabled;
498 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
499 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
502 * hexadecimal bitmask of RX mq mode can be enabled.
504 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
506 /* Forward function declarations */
507 static void setup_attached_port(portid_t pi);
508 static void map_port_queue_stats_mapping_registers(portid_t pi,
509 struct rte_port *port);
510 static void check_all_ports_link_status(uint32_t port_mask);
511 static int eth_event_callback(portid_t port_id,
512 enum rte_eth_event_type type,
513 void *param, void *ret_param);
514 static void dev_event_callback(const char *device_name,
515 enum rte_dev_event_type type,
519 * Check if all the ports are started.
520 * If yes, return positive value. If not, return zero.
522 static int all_ports_started(void);
524 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
525 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
527 /* Holds the registered mbuf dynamic flags names. */
528 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
531 * Helper function to check if socket is already discovered.
532 * If yes, return positive value. If not, return zero.
535 new_socket_id(unsigned int socket_id)
539 for (i = 0; i < num_sockets; i++) {
540 if (socket_ids[i] == socket_id)
547 * Setup default configuration.
550 set_default_fwd_lcores_config(void)
554 unsigned int sock_num;
557 for (i = 0; i < RTE_MAX_LCORE; i++) {
558 if (!rte_lcore_is_enabled(i))
560 sock_num = rte_lcore_to_socket_id(i);
561 if (new_socket_id(sock_num)) {
562 if (num_sockets >= RTE_MAX_NUMA_NODES) {
563 rte_exit(EXIT_FAILURE,
564 "Total sockets greater than %u\n",
567 socket_ids[num_sockets++] = sock_num;
569 if (i == rte_get_master_lcore())
571 fwd_lcores_cpuids[nb_lc++] = i;
573 nb_lcores = (lcoreid_t) nb_lc;
574 nb_cfg_lcores = nb_lcores;
579 set_def_peer_eth_addrs(void)
583 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
584 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
585 peer_eth_addrs[i].addr_bytes[5] = i;
590 set_default_fwd_ports_config(void)
595 RTE_ETH_FOREACH_DEV(pt_id) {
596 fwd_ports_ids[i++] = pt_id;
598 /* Update sockets info according to the attached device */
599 int socket_id = rte_eth_dev_socket_id(pt_id);
600 if (socket_id >= 0 && new_socket_id(socket_id)) {
601 if (num_sockets >= RTE_MAX_NUMA_NODES) {
602 rte_exit(EXIT_FAILURE,
603 "Total sockets greater than %u\n",
606 socket_ids[num_sockets++] = socket_id;
610 nb_cfg_ports = nb_ports;
611 nb_fwd_ports = nb_ports;
615 set_def_fwd_config(void)
617 set_default_fwd_lcores_config();
618 set_def_peer_eth_addrs();
619 set_default_fwd_ports_config();
622 /* extremely pessimistic estimation of memory required to create a mempool */
624 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
626 unsigned int n_pages, mbuf_per_pg, leftover;
627 uint64_t total_mem, mbuf_mem, obj_sz;
629 /* there is no good way to predict how much space the mempool will
630 * occupy because it will allocate chunks on the fly, and some of those
631 * will come from default DPDK memory while some will come from our
632 * external memory, so just assume 128MB will be enough for everyone.
634 uint64_t hdr_mem = 128 << 20;
636 /* account for possible non-contiguousness */
637 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
639 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
643 mbuf_per_pg = pgsz / obj_sz;
644 leftover = (nb_mbufs % mbuf_per_pg) > 0;
645 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
647 mbuf_mem = n_pages * pgsz;
649 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
651 if (total_mem > SIZE_MAX) {
652 TESTPMD_LOG(ERR, "Memory size too big\n");
655 *out = (size_t)total_mem;
661 pagesz_flags(uint64_t page_sz)
663 /* as per mmap() manpage, all page sizes are log2 of page size
664 * shifted by MAP_HUGE_SHIFT
666 int log2 = rte_log2_u64(page_sz);
668 return (log2 << HUGE_SHIFT);
672 alloc_mem(size_t memsz, size_t pgsz, bool huge)
677 /* allocate anonymous hugepages */
678 flags = MAP_ANONYMOUS | MAP_PRIVATE;
680 flags |= HUGE_FLAG | pagesz_flags(pgsz);
682 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
683 if (addr == MAP_FAILED)
689 struct extmem_param {
693 rte_iova_t *iova_table;
694 unsigned int iova_table_len;
698 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
701 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
702 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
703 unsigned int cur_page, n_pages, pgsz_idx;
704 size_t mem_sz, cur_pgsz;
705 rte_iova_t *iovas = NULL;
709 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
710 /* skip anything that is too big */
711 if (pgsizes[pgsz_idx] > SIZE_MAX)
714 cur_pgsz = pgsizes[pgsz_idx];
716 /* if we were told not to allocate hugepages, override */
718 cur_pgsz = sysconf(_SC_PAGESIZE);
720 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
722 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
726 /* allocate our memory */
727 addr = alloc_mem(mem_sz, cur_pgsz, huge);
729 /* if we couldn't allocate memory with a specified page size,
730 * that doesn't mean we can't do it with other page sizes, so
736 /* store IOVA addresses for every page in this memory area */
737 n_pages = mem_sz / cur_pgsz;
739 iovas = malloc(sizeof(*iovas) * n_pages);
742 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
745 /* lock memory if it's not huge pages */
749 /* populate IOVA addresses */
750 for (cur_page = 0; cur_page < n_pages; cur_page++) {
755 offset = cur_pgsz * cur_page;
756 cur = RTE_PTR_ADD(addr, offset);
758 /* touch the page before getting its IOVA */
759 *(volatile char *)cur = 0;
761 iova = rte_mem_virt2iova(cur);
763 iovas[cur_page] = iova;
768 /* if we couldn't allocate anything */
774 param->pgsz = cur_pgsz;
775 param->iova_table = iovas;
776 param->iova_table_len = n_pages;
783 munmap(addr, mem_sz);
789 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
791 struct extmem_param param;
794 memset(¶m, 0, sizeof(param));
796 /* check if our heap exists */
797 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
799 /* create our heap */
800 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
802 TESTPMD_LOG(ERR, "Cannot create heap\n");
807 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
809 TESTPMD_LOG(ERR, "Cannot create memory area\n");
813 /* we now have a valid memory area, so add it to heap */
814 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
815 param.addr, param.len, param.iova_table,
816 param.iova_table_len, param.pgsz);
818 /* when using VFIO, memory is automatically mapped for DMA by EAL */
820 /* not needed any more */
821 free(param.iova_table);
824 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
825 munmap(param.addr, param.len);
831 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
837 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
838 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
843 RTE_ETH_FOREACH_DEV(pid) {
844 struct rte_eth_dev *dev =
845 &rte_eth_devices[pid];
847 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
851 "unable to DMA unmap addr 0x%p "
853 memhdr->addr, dev->data->name);
856 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
859 "unable to un-register addr 0x%p\n", memhdr->addr);
864 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
865 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
868 size_t page_size = sysconf(_SC_PAGESIZE);
871 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
875 "unable to register addr 0x%p\n", memhdr->addr);
878 RTE_ETH_FOREACH_DEV(pid) {
879 struct rte_eth_dev *dev =
880 &rte_eth_devices[pid];
882 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
886 "unable to DMA map addr 0x%p "
888 memhdr->addr, dev->data->name);
894 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
895 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
897 struct rte_pktmbuf_extmem *xmem;
898 unsigned int ext_num, zone_num, elt_num;
901 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
902 elt_num = EXTBUF_ZONE_SIZE / elt_size;
903 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
905 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
907 TESTPMD_LOG(ERR, "Cannot allocate memory for "
908 "external buffer descriptors\n");
912 for (ext_num = 0; ext_num < zone_num; ext_num++) {
913 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
914 const struct rte_memzone *mz;
915 char mz_name[RTE_MEMZONE_NAMESIZE];
918 ret = snprintf(mz_name, sizeof(mz_name),
919 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
920 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
921 errno = ENAMETOOLONG;
925 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
927 RTE_MEMZONE_IOVA_CONTIG |
929 RTE_MEMZONE_SIZE_HINT_ONLY,
933 * The caller exits on external buffer creation
934 * error, so there is no need to free memzones.
940 xseg->buf_ptr = mz->addr;
941 xseg->buf_iova = mz->iova;
942 xseg->buf_len = EXTBUF_ZONE_SIZE;
943 xseg->elt_size = elt_size;
945 if (ext_num == 0 && xmem != NULL) {
954 * Configuration initialisation done once at init time.
956 static struct rte_mempool *
957 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
958 unsigned int socket_id)
960 char pool_name[RTE_MEMPOOL_NAMESIZE];
961 struct rte_mempool *rte_mp = NULL;
964 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
965 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
968 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
969 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
971 switch (mp_alloc_type) {
972 case MP_ALLOC_NATIVE:
974 /* wrapper to rte_mempool_create() */
975 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
976 rte_mbuf_best_mempool_ops());
977 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
978 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
983 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
984 mb_size, (unsigned int) mb_mempool_cache,
985 sizeof(struct rte_pktmbuf_pool_private),
986 socket_id, mempool_flags);
990 if (rte_mempool_populate_anon(rte_mp) == 0) {
991 rte_mempool_free(rte_mp);
995 rte_pktmbuf_pool_init(rte_mp, NULL);
996 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
997 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1001 case MP_ALLOC_XMEM_HUGE:
1004 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1006 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1007 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1010 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1011 if (heap_socket < 0)
1012 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1014 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1015 rte_mbuf_best_mempool_ops());
1016 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1017 mb_mempool_cache, 0, mbuf_seg_size,
1023 struct rte_pktmbuf_extmem *ext_mem;
1024 unsigned int ext_num;
1026 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1027 socket_id, pool_name, &ext_mem);
1029 rte_exit(EXIT_FAILURE,
1030 "Can't create pinned data buffers\n");
1032 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1033 rte_mbuf_best_mempool_ops());
1034 rte_mp = rte_pktmbuf_pool_create_extbuf
1035 (pool_name, nb_mbuf, mb_mempool_cache,
1036 0, mbuf_seg_size, socket_id,
1043 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1048 if (rte_mp == NULL) {
1049 rte_exit(EXIT_FAILURE,
1050 "Creation of mbuf pool for socket %u failed: %s\n",
1051 socket_id, rte_strerror(rte_errno));
1052 } else if (verbose_level > 0) {
1053 rte_mempool_dump(stdout, rte_mp);
1059 * Check given socket id is valid or not with NUMA mode,
1060 * if valid, return 0, else return -1
1063 check_socket_id(const unsigned int socket_id)
1065 static int warning_once = 0;
1067 if (new_socket_id(socket_id)) {
1068 if (!warning_once && numa_support)
1069 printf("Warning: NUMA should be configured manually by"
1070 " using --port-numa-config and"
1071 " --ring-numa-config parameters along with"
1080 * Get the allowed maximum number of RX queues.
1081 * *pid return the port id which has minimal value of
1082 * max_rx_queues in all ports.
1085 get_allowed_max_nb_rxq(portid_t *pid)
1087 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1088 bool max_rxq_valid = false;
1090 struct rte_eth_dev_info dev_info;
1092 RTE_ETH_FOREACH_DEV(pi) {
1093 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1096 max_rxq_valid = true;
1097 if (dev_info.max_rx_queues < allowed_max_rxq) {
1098 allowed_max_rxq = dev_info.max_rx_queues;
1102 return max_rxq_valid ? allowed_max_rxq : 0;
1106 * Check input rxq is valid or not.
1107 * If input rxq is not greater than any of maximum number
1108 * of RX queues of all ports, it is valid.
1109 * if valid, return 0, else return -1
1112 check_nb_rxq(queueid_t rxq)
1114 queueid_t allowed_max_rxq;
1117 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1118 if (rxq > allowed_max_rxq) {
1119 printf("Fail: input rxq (%u) can't be greater "
1120 "than max_rx_queues (%u) of port %u\n",
1130 * Get the allowed maximum number of TX queues.
1131 * *pid return the port id which has minimal value of
1132 * max_tx_queues in all ports.
1135 get_allowed_max_nb_txq(portid_t *pid)
1137 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1138 bool max_txq_valid = false;
1140 struct rte_eth_dev_info dev_info;
1142 RTE_ETH_FOREACH_DEV(pi) {
1143 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1146 max_txq_valid = true;
1147 if (dev_info.max_tx_queues < allowed_max_txq) {
1148 allowed_max_txq = dev_info.max_tx_queues;
1152 return max_txq_valid ? allowed_max_txq : 0;
1156 * Check input txq is valid or not.
1157 * If input txq is not greater than any of maximum number
1158 * of TX queues of all ports, it is valid.
1159 * if valid, return 0, else return -1
1162 check_nb_txq(queueid_t txq)
1164 queueid_t allowed_max_txq;
1167 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1168 if (txq > allowed_max_txq) {
1169 printf("Fail: input txq (%u) can't be greater "
1170 "than max_tx_queues (%u) of port %u\n",
1180 * Get the allowed maximum number of RXDs of every rx queue.
1181 * *pid return the port id which has minimal value of
1182 * max_rxd in all queues of all ports.
1185 get_allowed_max_nb_rxd(portid_t *pid)
1187 uint16_t allowed_max_rxd = UINT16_MAX;
1189 struct rte_eth_dev_info dev_info;
1191 RTE_ETH_FOREACH_DEV(pi) {
1192 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1195 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1196 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1200 return allowed_max_rxd;
1204 * Get the allowed minimal number of RXDs of every rx queue.
1205 * *pid return the port id which has minimal value of
1206 * min_rxd in all queues of all ports.
1209 get_allowed_min_nb_rxd(portid_t *pid)
1211 uint16_t allowed_min_rxd = 0;
1213 struct rte_eth_dev_info dev_info;
1215 RTE_ETH_FOREACH_DEV(pi) {
1216 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1219 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1220 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1225 return allowed_min_rxd;
1229 * Check input rxd is valid or not.
1230 * If input rxd is not greater than any of maximum number
1231 * of RXDs of every Rx queues and is not less than any of
1232 * minimal number of RXDs of every Rx queues, it is valid.
1233 * if valid, return 0, else return -1
1236 check_nb_rxd(queueid_t rxd)
1238 uint16_t allowed_max_rxd;
1239 uint16_t allowed_min_rxd;
1242 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1243 if (rxd > allowed_max_rxd) {
1244 printf("Fail: input rxd (%u) can't be greater "
1245 "than max_rxds (%u) of port %u\n",
1252 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1253 if (rxd < allowed_min_rxd) {
1254 printf("Fail: input rxd (%u) can't be less "
1255 "than min_rxds (%u) of port %u\n",
1266 * Get the allowed maximum number of TXDs of every rx queues.
1267 * *pid return the port id which has minimal value of
1268 * max_txd in every tx queue.
1271 get_allowed_max_nb_txd(portid_t *pid)
1273 uint16_t allowed_max_txd = UINT16_MAX;
1275 struct rte_eth_dev_info dev_info;
1277 RTE_ETH_FOREACH_DEV(pi) {
1278 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1281 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1282 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1286 return allowed_max_txd;
1290 * Get the allowed maximum number of TXDs of every tx queues.
1291 * *pid return the port id which has minimal value of
1292 * min_txd in every tx queue.
1295 get_allowed_min_nb_txd(portid_t *pid)
1297 uint16_t allowed_min_txd = 0;
1299 struct rte_eth_dev_info dev_info;
1301 RTE_ETH_FOREACH_DEV(pi) {
1302 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1305 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1306 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1311 return allowed_min_txd;
1315 * Check input txd is valid or not.
1316 * If input txd is not greater than any of maximum number
1317 * of TXDs of every Rx queues, it is valid.
1318 * if valid, return 0, else return -1
1321 check_nb_txd(queueid_t txd)
1323 uint16_t allowed_max_txd;
1324 uint16_t allowed_min_txd;
1327 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1328 if (txd > allowed_max_txd) {
1329 printf("Fail: input txd (%u) can't be greater "
1330 "than max_txds (%u) of port %u\n",
1337 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1338 if (txd < allowed_min_txd) {
1339 printf("Fail: input txd (%u) can't be less "
1340 "than min_txds (%u) of port %u\n",
1351 * Get the allowed maximum number of hairpin queues.
1352 * *pid return the port id which has minimal value of
1353 * max_hairpin_queues in all ports.
1356 get_allowed_max_nb_hairpinq(portid_t *pid)
1358 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1360 struct rte_eth_hairpin_cap cap;
1362 RTE_ETH_FOREACH_DEV(pi) {
1363 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1367 if (cap.max_nb_queues < allowed_max_hairpinq) {
1368 allowed_max_hairpinq = cap.max_nb_queues;
1372 return allowed_max_hairpinq;
1376 * Check input hairpin is valid or not.
1377 * If input hairpin is not greater than any of maximum number
1378 * of hairpin queues of all ports, it is valid.
1379 * if valid, return 0, else return -1
1382 check_nb_hairpinq(queueid_t hairpinq)
1384 queueid_t allowed_max_hairpinq;
1387 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1388 if (hairpinq > allowed_max_hairpinq) {
1389 printf("Fail: input hairpin (%u) can't be greater "
1390 "than max_hairpin_queues (%u) of port %u\n",
1391 hairpinq, allowed_max_hairpinq, pid);
1401 struct rte_port *port;
1402 struct rte_mempool *mbp;
1403 unsigned int nb_mbuf_per_pool;
1405 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1406 struct rte_gro_param gro_param;
1413 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1415 /* Configuration of logical cores. */
1416 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1417 sizeof(struct fwd_lcore *) * nb_lcores,
1418 RTE_CACHE_LINE_SIZE);
1419 if (fwd_lcores == NULL) {
1420 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1421 "failed\n", nb_lcores);
1423 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1424 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1425 sizeof(struct fwd_lcore),
1426 RTE_CACHE_LINE_SIZE);
1427 if (fwd_lcores[lc_id] == NULL) {
1428 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1431 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1434 RTE_ETH_FOREACH_DEV(pid) {
1436 /* Apply default TxRx configuration for all ports */
1437 port->dev_conf.txmode = tx_mode;
1438 port->dev_conf.rxmode = rx_mode;
1440 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1442 rte_exit(EXIT_FAILURE,
1443 "rte_eth_dev_info_get() failed\n");
1445 if (!(port->dev_info.tx_offload_capa &
1446 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1447 port->dev_conf.txmode.offloads &=
1448 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1450 if (port_numa[pid] != NUMA_NO_CONFIG)
1451 port_per_socket[port_numa[pid]]++;
1453 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1456 * if socket_id is invalid,
1457 * set to the first available socket.
1459 if (check_socket_id(socket_id) < 0)
1460 socket_id = socket_ids[0];
1461 port_per_socket[socket_id]++;
1465 /* Apply Rx offloads configuration */
1466 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1467 port->rx_conf[k].offloads =
1468 port->dev_conf.rxmode.offloads;
1469 /* Apply Tx offloads configuration */
1470 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1471 port->tx_conf[k].offloads =
1472 port->dev_conf.txmode.offloads;
1474 /* set flag to initialize port/queue */
1475 port->need_reconfig = 1;
1476 port->need_reconfig_queues = 1;
1477 port->tx_metadata = 0;
1479 /* Check for maximum number of segments per MTU. Accordingly
1480 * update the mbuf data size.
1482 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1483 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1484 data_size = rx_mode.max_rx_pkt_len /
1485 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1487 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1489 mbuf_data_size = data_size +
1490 RTE_PKTMBUF_HEADROOM;
1497 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1501 * Create pools of mbuf.
1502 * If NUMA support is disabled, create a single pool of mbuf in
1503 * socket 0 memory by default.
1504 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1506 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1507 * nb_txd can be configured at run time.
1509 if (param_total_num_mbufs)
1510 nb_mbuf_per_pool = param_total_num_mbufs;
1512 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1513 (nb_lcores * mb_mempool_cache) +
1514 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1515 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1521 for (i = 0; i < num_sockets; i++)
1522 mempools[i] = mbuf_pool_create(mbuf_data_size,
1526 if (socket_num == UMA_NO_CONFIG)
1527 mempools[0] = mbuf_pool_create(mbuf_data_size,
1528 nb_mbuf_per_pool, 0);
1530 mempools[socket_num] = mbuf_pool_create
1538 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1539 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1541 * Records which Mbuf pool to use by each logical core, if needed.
1543 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1544 mbp = mbuf_pool_find(
1545 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1548 mbp = mbuf_pool_find(0);
1549 fwd_lcores[lc_id]->mbp = mbp;
1550 /* initialize GSO context */
1551 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1552 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1553 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1554 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1556 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1559 /* Configuration of packet forwarding streams. */
1560 if (init_fwd_streams() < 0)
1561 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1565 /* create a gro context for each lcore */
1566 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1567 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1568 gro_param.max_item_per_flow = MAX_PKT_BURST;
1569 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1570 gro_param.socket_id = rte_lcore_to_socket_id(
1571 fwd_lcores_cpuids[lc_id]);
1572 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1573 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1574 rte_exit(EXIT_FAILURE,
1575 "rte_gro_ctx_create() failed\n");
1582 reconfig(portid_t new_port_id, unsigned socket_id)
1584 struct rte_port *port;
1587 /* Reconfiguration of Ethernet ports. */
1588 port = &ports[new_port_id];
1590 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1594 /* set flag to initialize port/queue */
1595 port->need_reconfig = 1;
1596 port->need_reconfig_queues = 1;
1597 port->socket_id = socket_id;
1604 init_fwd_streams(void)
1607 struct rte_port *port;
1608 streamid_t sm_id, nb_fwd_streams_new;
1611 /* set socket id according to numa or not */
1612 RTE_ETH_FOREACH_DEV(pid) {
1614 if (nb_rxq > port->dev_info.max_rx_queues) {
1615 printf("Fail: nb_rxq(%d) is greater than "
1616 "max_rx_queues(%d)\n", nb_rxq,
1617 port->dev_info.max_rx_queues);
1620 if (nb_txq > port->dev_info.max_tx_queues) {
1621 printf("Fail: nb_txq(%d) is greater than "
1622 "max_tx_queues(%d)\n", nb_txq,
1623 port->dev_info.max_tx_queues);
1627 if (port_numa[pid] != NUMA_NO_CONFIG)
1628 port->socket_id = port_numa[pid];
1630 port->socket_id = rte_eth_dev_socket_id(pid);
1633 * if socket_id is invalid,
1634 * set to the first available socket.
1636 if (check_socket_id(port->socket_id) < 0)
1637 port->socket_id = socket_ids[0];
1641 if (socket_num == UMA_NO_CONFIG)
1642 port->socket_id = 0;
1644 port->socket_id = socket_num;
1648 q = RTE_MAX(nb_rxq, nb_txq);
1650 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1653 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1654 if (nb_fwd_streams_new == nb_fwd_streams)
1657 if (fwd_streams != NULL) {
1658 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1659 if (fwd_streams[sm_id] == NULL)
1661 rte_free(fwd_streams[sm_id]);
1662 fwd_streams[sm_id] = NULL;
1664 rte_free(fwd_streams);
1669 nb_fwd_streams = nb_fwd_streams_new;
1670 if (nb_fwd_streams) {
1671 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1672 sizeof(struct fwd_stream *) * nb_fwd_streams,
1673 RTE_CACHE_LINE_SIZE);
1674 if (fwd_streams == NULL)
1675 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1676 " (struct fwd_stream *)) failed\n",
1679 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1680 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1681 " struct fwd_stream", sizeof(struct fwd_stream),
1682 RTE_CACHE_LINE_SIZE);
1683 if (fwd_streams[sm_id] == NULL)
1684 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1685 "(struct fwd_stream) failed\n");
1693 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1695 uint64_t total_burst, sburst;
1697 uint64_t burst_stats[4];
1698 uint16_t pktnb_stats[4];
1700 int burst_percent[4], sburstp;
1704 * First compute the total number of packet bursts and the
1705 * two highest numbers of bursts of the same number of packets.
1707 memset(&burst_stats, 0x0, sizeof(burst_stats));
1708 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1710 /* Show stats for 0 burst size always */
1711 total_burst = pbs->pkt_burst_spread[0];
1712 burst_stats[0] = pbs->pkt_burst_spread[0];
1715 /* Find the next 2 burst sizes with highest occurrences. */
1716 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1717 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1722 total_burst += nb_burst;
1724 if (nb_burst > burst_stats[1]) {
1725 burst_stats[2] = burst_stats[1];
1726 pktnb_stats[2] = pktnb_stats[1];
1727 burst_stats[1] = nb_burst;
1728 pktnb_stats[1] = nb_pkt;
1729 } else if (nb_burst > burst_stats[2]) {
1730 burst_stats[2] = nb_burst;
1731 pktnb_stats[2] = nb_pkt;
1734 if (total_burst == 0)
1737 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1738 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1740 printf("%d%% of other]\n", 100 - sburstp);
1744 sburst += burst_stats[i];
1745 if (sburst == total_burst) {
1746 printf("%d%% of %d pkts]\n",
1747 100 - sburstp, (int) pktnb_stats[i]);
1752 (double)burst_stats[i] / total_burst * 100;
1753 printf("%d%% of %d pkts + ",
1754 burst_percent[i], (int) pktnb_stats[i]);
1755 sburstp += burst_percent[i];
1760 fwd_stream_stats_display(streamid_t stream_id)
1762 struct fwd_stream *fs;
1763 static const char *fwd_top_stats_border = "-------";
1765 fs = fwd_streams[stream_id];
1766 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1767 (fs->fwd_dropped == 0))
1769 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1770 "TX Port=%2d/Queue=%2d %s\n",
1771 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1772 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1773 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1774 " TX-dropped: %-14"PRIu64,
1775 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1777 /* if checksum mode */
1778 if (cur_fwd_eng == &csum_fwd_engine) {
1779 printf(" RX- bad IP checksum: %-14"PRIu64
1780 " Rx- bad L4 checksum: %-14"PRIu64
1781 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1782 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1783 fs->rx_bad_outer_l4_csum);
1788 if (record_burst_stats) {
1789 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1790 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1795 fwd_stats_display(void)
1797 static const char *fwd_stats_border = "----------------------";
1798 static const char *acc_stats_border = "+++++++++++++++";
1800 struct fwd_stream *rx_stream;
1801 struct fwd_stream *tx_stream;
1802 uint64_t tx_dropped;
1803 uint64_t rx_bad_ip_csum;
1804 uint64_t rx_bad_l4_csum;
1805 uint64_t rx_bad_outer_l4_csum;
1806 } ports_stats[RTE_MAX_ETHPORTS];
1807 uint64_t total_rx_dropped = 0;
1808 uint64_t total_tx_dropped = 0;
1809 uint64_t total_rx_nombuf = 0;
1810 struct rte_eth_stats stats;
1811 uint64_t fwd_cycles = 0;
1812 uint64_t total_recv = 0;
1813 uint64_t total_xmit = 0;
1814 struct rte_port *port;
1819 memset(ports_stats, 0, sizeof(ports_stats));
1821 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1822 struct fwd_stream *fs = fwd_streams[sm_id];
1824 if (cur_fwd_config.nb_fwd_streams >
1825 cur_fwd_config.nb_fwd_ports) {
1826 fwd_stream_stats_display(sm_id);
1828 ports_stats[fs->tx_port].tx_stream = fs;
1829 ports_stats[fs->rx_port].rx_stream = fs;
1832 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1834 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1835 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1836 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1837 fs->rx_bad_outer_l4_csum;
1839 if (record_core_cycles)
1840 fwd_cycles += fs->core_cycles;
1842 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1845 pt_id = fwd_ports_ids[i];
1846 port = &ports[pt_id];
1848 rte_eth_stats_get(pt_id, &stats);
1849 stats.ipackets -= port->stats.ipackets;
1850 stats.opackets -= port->stats.opackets;
1851 stats.ibytes -= port->stats.ibytes;
1852 stats.obytes -= port->stats.obytes;
1853 stats.imissed -= port->stats.imissed;
1854 stats.oerrors -= port->stats.oerrors;
1855 stats.rx_nombuf -= port->stats.rx_nombuf;
1857 total_recv += stats.ipackets;
1858 total_xmit += stats.opackets;
1859 total_rx_dropped += stats.imissed;
1860 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1861 total_tx_dropped += stats.oerrors;
1862 total_rx_nombuf += stats.rx_nombuf;
1864 printf("\n %s Forward statistics for port %-2d %s\n",
1865 fwd_stats_border, pt_id, fwd_stats_border);
1867 if (!port->rx_queue_stats_mapping_enabled &&
1868 !port->tx_queue_stats_mapping_enabled) {
1869 printf(" RX-packets: %-14"PRIu64
1870 " RX-dropped: %-14"PRIu64
1871 "RX-total: %-"PRIu64"\n",
1872 stats.ipackets, stats.imissed,
1873 stats.ipackets + stats.imissed);
1875 if (cur_fwd_eng == &csum_fwd_engine)
1876 printf(" Bad-ipcsum: %-14"PRIu64
1877 " Bad-l4csum: %-14"PRIu64
1878 "Bad-outer-l4csum: %-14"PRIu64"\n",
1879 ports_stats[pt_id].rx_bad_ip_csum,
1880 ports_stats[pt_id].rx_bad_l4_csum,
1881 ports_stats[pt_id].rx_bad_outer_l4_csum);
1882 if (stats.ierrors + stats.rx_nombuf > 0) {
1883 printf(" RX-error: %-"PRIu64"\n",
1885 printf(" RX-nombufs: %-14"PRIu64"\n",
1889 printf(" TX-packets: %-14"PRIu64
1890 " TX-dropped: %-14"PRIu64
1891 "TX-total: %-"PRIu64"\n",
1892 stats.opackets, ports_stats[pt_id].tx_dropped,
1893 stats.opackets + ports_stats[pt_id].tx_dropped);
1895 printf(" RX-packets: %14"PRIu64
1896 " RX-dropped:%14"PRIu64
1897 " RX-total:%14"PRIu64"\n",
1898 stats.ipackets, stats.imissed,
1899 stats.ipackets + stats.imissed);
1901 if (cur_fwd_eng == &csum_fwd_engine)
1902 printf(" Bad-ipcsum:%14"PRIu64
1903 " Bad-l4csum:%14"PRIu64
1904 " Bad-outer-l4csum: %-14"PRIu64"\n",
1905 ports_stats[pt_id].rx_bad_ip_csum,
1906 ports_stats[pt_id].rx_bad_l4_csum,
1907 ports_stats[pt_id].rx_bad_outer_l4_csum);
1908 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1909 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1910 printf(" RX-nombufs: %14"PRIu64"\n",
1914 printf(" TX-packets: %14"PRIu64
1915 " TX-dropped:%14"PRIu64
1916 " TX-total:%14"PRIu64"\n",
1917 stats.opackets, ports_stats[pt_id].tx_dropped,
1918 stats.opackets + ports_stats[pt_id].tx_dropped);
1921 if (record_burst_stats) {
1922 if (ports_stats[pt_id].rx_stream)
1923 pkt_burst_stats_display("RX",
1924 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1925 if (ports_stats[pt_id].tx_stream)
1926 pkt_burst_stats_display("TX",
1927 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1930 if (port->rx_queue_stats_mapping_enabled) {
1932 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1933 printf(" Stats reg %2d RX-packets:%14"PRIu64
1934 " RX-errors:%14"PRIu64
1935 " RX-bytes:%14"PRIu64"\n",
1936 j, stats.q_ipackets[j],
1937 stats.q_errors[j], stats.q_ibytes[j]);
1941 if (port->tx_queue_stats_mapping_enabled) {
1942 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1943 printf(" Stats reg %2d TX-packets:%14"PRIu64
1946 j, stats.q_opackets[j],
1951 printf(" %s--------------------------------%s\n",
1952 fwd_stats_border, fwd_stats_border);
1955 printf("\n %s Accumulated forward statistics for all ports"
1957 acc_stats_border, acc_stats_border);
1958 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1960 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1962 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1963 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1964 if (total_rx_nombuf > 0)
1965 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1966 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1968 acc_stats_border, acc_stats_border);
1969 if (record_core_cycles) {
1970 #define CYC_PER_MHZ 1E6
1971 if (total_recv > 0 || total_xmit > 0) {
1972 uint64_t total_pkts = 0;
1973 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
1974 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
1975 total_pkts = total_xmit;
1977 total_pkts = total_recv;
1979 printf("\n CPU cycles/packet=%.2F (total cycles="
1980 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
1982 (double) fwd_cycles / total_pkts,
1983 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
1984 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
1990 fwd_stats_reset(void)
1996 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1997 pt_id = fwd_ports_ids[i];
1998 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2000 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2001 struct fwd_stream *fs = fwd_streams[sm_id];
2005 fs->fwd_dropped = 0;
2006 fs->rx_bad_ip_csum = 0;
2007 fs->rx_bad_l4_csum = 0;
2008 fs->rx_bad_outer_l4_csum = 0;
2010 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2011 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2012 fs->core_cycles = 0;
2017 flush_fwd_rx_queues(void)
2019 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2026 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2027 uint64_t timer_period;
2029 /* convert to number of cycles */
2030 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2032 for (j = 0; j < 2; j++) {
2033 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2034 for (rxq = 0; rxq < nb_rxq; rxq++) {
2035 port_id = fwd_ports_ids[rxp];
2037 * testpmd can stuck in the below do while loop
2038 * if rte_eth_rx_burst() always returns nonzero
2039 * packets. So timer is added to exit this loop
2040 * after 1sec timer expiry.
2042 prev_tsc = rte_rdtsc();
2044 nb_rx = rte_eth_rx_burst(port_id, rxq,
2045 pkts_burst, MAX_PKT_BURST);
2046 for (i = 0; i < nb_rx; i++)
2047 rte_pktmbuf_free(pkts_burst[i]);
2049 cur_tsc = rte_rdtsc();
2050 diff_tsc = cur_tsc - prev_tsc;
2051 timer_tsc += diff_tsc;
2052 } while ((nb_rx > 0) &&
2053 (timer_tsc < timer_period));
2057 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2062 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2064 struct fwd_stream **fsm;
2067 #ifdef RTE_LIBRTE_BITRATESTATS
2068 uint64_t tics_per_1sec;
2069 uint64_t tics_datum;
2070 uint64_t tics_current;
2071 uint16_t i, cnt_ports;
2073 cnt_ports = nb_ports;
2074 tics_datum = rte_rdtsc();
2075 tics_per_1sec = rte_get_timer_hz();
2077 fsm = &fwd_streams[fc->stream_idx];
2078 nb_fs = fc->stream_nb;
2080 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2081 (*pkt_fwd)(fsm[sm_id]);
2082 #ifdef RTE_LIBRTE_BITRATESTATS
2083 if (bitrate_enabled != 0 &&
2084 bitrate_lcore_id == rte_lcore_id()) {
2085 tics_current = rte_rdtsc();
2086 if (tics_current - tics_datum >= tics_per_1sec) {
2087 /* Periodic bitrate calculation */
2088 for (i = 0; i < cnt_ports; i++)
2089 rte_stats_bitrate_calc(bitrate_data,
2091 tics_datum = tics_current;
2095 #ifdef RTE_LIBRTE_LATENCY_STATS
2096 if (latencystats_enabled != 0 &&
2097 latencystats_lcore_id == rte_lcore_id())
2098 rte_latencystats_update();
2101 } while (! fc->stopped);
2105 start_pkt_forward_on_core(void *fwd_arg)
2107 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2108 cur_fwd_config.fwd_eng->packet_fwd);
2113 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2114 * Used to start communication flows in network loopback test configurations.
2117 run_one_txonly_burst_on_core(void *fwd_arg)
2119 struct fwd_lcore *fwd_lc;
2120 struct fwd_lcore tmp_lcore;
2122 fwd_lc = (struct fwd_lcore *) fwd_arg;
2123 tmp_lcore = *fwd_lc;
2124 tmp_lcore.stopped = 1;
2125 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2130 * Launch packet forwarding:
2131 * - Setup per-port forwarding context.
2132 * - launch logical cores with their forwarding configuration.
2135 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2137 port_fwd_begin_t port_fwd_begin;
2142 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2143 if (port_fwd_begin != NULL) {
2144 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2145 (*port_fwd_begin)(fwd_ports_ids[i]);
2147 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2148 lc_id = fwd_lcores_cpuids[i];
2149 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2150 fwd_lcores[i]->stopped = 0;
2151 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2152 fwd_lcores[i], lc_id);
2154 printf("launch lcore %u failed - diag=%d\n",
2161 * Launch packet forwarding configuration.
2164 start_packet_forwarding(int with_tx_first)
2166 port_fwd_begin_t port_fwd_begin;
2167 port_fwd_end_t port_fwd_end;
2168 struct rte_port *port;
2172 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2173 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2175 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2176 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2178 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2179 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2180 (!nb_rxq || !nb_txq))
2181 rte_exit(EXIT_FAILURE,
2182 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2183 cur_fwd_eng->fwd_mode_name);
2185 if (all_ports_started() == 0) {
2186 printf("Not all ports were started\n");
2189 if (test_done == 0) {
2190 printf("Packet forwarding already started\n");
2196 for (i = 0; i < nb_fwd_ports; i++) {
2197 pt_id = fwd_ports_ids[i];
2198 port = &ports[pt_id];
2199 if (!port->dcb_flag) {
2200 printf("In DCB mode, all forwarding ports must "
2201 "be configured in this mode.\n");
2205 if (nb_fwd_lcores == 1) {
2206 printf("In DCB mode,the nb forwarding cores "
2207 "should be larger than 1.\n");
2216 flush_fwd_rx_queues();
2218 pkt_fwd_config_display(&cur_fwd_config);
2219 rxtx_config_display();
2222 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2223 pt_id = fwd_ports_ids[i];
2224 port = &ports[pt_id];
2225 map_port_queue_stats_mapping_registers(pt_id, port);
2227 if (with_tx_first) {
2228 port_fwd_begin = tx_only_engine.port_fwd_begin;
2229 if (port_fwd_begin != NULL) {
2230 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2231 (*port_fwd_begin)(fwd_ports_ids[i]);
2233 while (with_tx_first--) {
2234 launch_packet_forwarding(
2235 run_one_txonly_burst_on_core);
2236 rte_eal_mp_wait_lcore();
2238 port_fwd_end = tx_only_engine.port_fwd_end;
2239 if (port_fwd_end != NULL) {
2240 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2241 (*port_fwd_end)(fwd_ports_ids[i]);
2244 launch_packet_forwarding(start_pkt_forward_on_core);
2248 stop_packet_forwarding(void)
2250 port_fwd_end_t port_fwd_end;
2256 printf("Packet forwarding not started\n");
2259 printf("Telling cores to stop...");
2260 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2261 fwd_lcores[lc_id]->stopped = 1;
2262 printf("\nWaiting for lcores to finish...\n");
2263 rte_eal_mp_wait_lcore();
2264 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2265 if (port_fwd_end != NULL) {
2266 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2267 pt_id = fwd_ports_ids[i];
2268 (*port_fwd_end)(pt_id);
2272 fwd_stats_display();
2274 printf("\nDone.\n");
2279 dev_set_link_up(portid_t pid)
2281 if (rte_eth_dev_set_link_up(pid) < 0)
2282 printf("\nSet link up fail.\n");
2286 dev_set_link_down(portid_t pid)
2288 if (rte_eth_dev_set_link_down(pid) < 0)
2289 printf("\nSet link down fail.\n");
2293 all_ports_started(void)
2296 struct rte_port *port;
2298 RTE_ETH_FOREACH_DEV(pi) {
2300 /* Check if there is a port which is not started */
2301 if ((port->port_status != RTE_PORT_STARTED) &&
2302 (port->slave_flag == 0))
2306 /* No port is not started */
2311 port_is_stopped(portid_t port_id)
2313 struct rte_port *port = &ports[port_id];
2315 if ((port->port_status != RTE_PORT_STOPPED) &&
2316 (port->slave_flag == 0))
2322 all_ports_stopped(void)
2326 RTE_ETH_FOREACH_DEV(pi) {
2327 if (!port_is_stopped(pi))
2335 port_is_started(portid_t port_id)
2337 if (port_id_is_invalid(port_id, ENABLED_WARN))
2340 if (ports[port_id].port_status != RTE_PORT_STARTED)
2346 /* Configure the Rx and Tx hairpin queues for the selected port. */
2348 setup_hairpin_queues(portid_t pi)
2351 struct rte_eth_hairpin_conf hairpin_conf = {
2356 struct rte_port *port = &ports[pi];
2358 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2359 hairpin_conf.peers[0].port = pi;
2360 hairpin_conf.peers[0].queue = i + nb_rxq;
2361 diag = rte_eth_tx_hairpin_queue_setup
2362 (pi, qi, nb_txd, &hairpin_conf);
2367 /* Fail to setup rx queue, return */
2368 if (rte_atomic16_cmpset(&(port->port_status),
2370 RTE_PORT_STOPPED) == 0)
2371 printf("Port %d can not be set back "
2372 "to stopped\n", pi);
2373 printf("Fail to configure port %d hairpin "
2375 /* try to reconfigure queues next time */
2376 port->need_reconfig_queues = 1;
2379 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2380 hairpin_conf.peers[0].port = pi;
2381 hairpin_conf.peers[0].queue = i + nb_txq;
2382 diag = rte_eth_rx_hairpin_queue_setup
2383 (pi, qi, nb_rxd, &hairpin_conf);
2388 /* Fail to setup rx queue, return */
2389 if (rte_atomic16_cmpset(&(port->port_status),
2391 RTE_PORT_STOPPED) == 0)
2392 printf("Port %d can not be set back "
2393 "to stopped\n", pi);
2394 printf("Fail to configure port %d hairpin "
2396 /* try to reconfigure queues next time */
2397 port->need_reconfig_queues = 1;
2404 start_port(portid_t pid)
2406 int diag, need_check_link_status = -1;
2409 struct rte_port *port;
2410 struct rte_ether_addr mac_addr;
2411 struct rte_eth_hairpin_cap cap;
2413 if (port_id_is_invalid(pid, ENABLED_WARN))
2418 RTE_ETH_FOREACH_DEV(pi) {
2419 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2422 need_check_link_status = 0;
2424 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2425 RTE_PORT_HANDLING) == 0) {
2426 printf("Port %d is now not stopped\n", pi);
2430 if (port->need_reconfig > 0) {
2431 port->need_reconfig = 0;
2433 if (flow_isolate_all) {
2434 int ret = port_flow_isolate(pi, 1);
2436 printf("Failed to apply isolated"
2437 " mode on port %d\n", pi);
2441 configure_rxtx_dump_callbacks(0);
2442 printf("Configuring Port %d (socket %u)\n", pi,
2444 if (nb_hairpinq > 0 &&
2445 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2446 printf("Port %d doesn't support hairpin "
2450 /* configure port */
2451 diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
2452 nb_txq + nb_hairpinq,
2455 if (rte_atomic16_cmpset(&(port->port_status),
2456 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2457 printf("Port %d can not be set back "
2458 "to stopped\n", pi);
2459 printf("Fail to configure port %d\n", pi);
2460 /* try to reconfigure port next time */
2461 port->need_reconfig = 1;
2465 if (port->need_reconfig_queues > 0) {
2466 port->need_reconfig_queues = 0;
2467 /* setup tx queues */
2468 for (qi = 0; qi < nb_txq; qi++) {
2469 if ((numa_support) &&
2470 (txring_numa[pi] != NUMA_NO_CONFIG))
2471 diag = rte_eth_tx_queue_setup(pi, qi,
2472 port->nb_tx_desc[qi],
2474 &(port->tx_conf[qi]));
2476 diag = rte_eth_tx_queue_setup(pi, qi,
2477 port->nb_tx_desc[qi],
2479 &(port->tx_conf[qi]));
2484 /* Fail to setup tx queue, return */
2485 if (rte_atomic16_cmpset(&(port->port_status),
2487 RTE_PORT_STOPPED) == 0)
2488 printf("Port %d can not be set back "
2489 "to stopped\n", pi);
2490 printf("Fail to configure port %d tx queues\n",
2492 /* try to reconfigure queues next time */
2493 port->need_reconfig_queues = 1;
2496 for (qi = 0; qi < nb_rxq; qi++) {
2497 /* setup rx queues */
2498 if ((numa_support) &&
2499 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2500 struct rte_mempool * mp =
2501 mbuf_pool_find(rxring_numa[pi]);
2503 printf("Failed to setup RX queue:"
2504 "No mempool allocation"
2505 " on the socket %d\n",
2510 diag = rte_eth_rx_queue_setup(pi, qi,
2511 port->nb_rx_desc[qi],
2513 &(port->rx_conf[qi]),
2516 struct rte_mempool *mp =
2517 mbuf_pool_find(port->socket_id);
2519 printf("Failed to setup RX queue:"
2520 "No mempool allocation"
2521 " on the socket %d\n",
2525 diag = rte_eth_rx_queue_setup(pi, qi,
2526 port->nb_rx_desc[qi],
2528 &(port->rx_conf[qi]),
2534 /* Fail to setup rx queue, return */
2535 if (rte_atomic16_cmpset(&(port->port_status),
2537 RTE_PORT_STOPPED) == 0)
2538 printf("Port %d can not be set back "
2539 "to stopped\n", pi);
2540 printf("Fail to configure port %d rx queues\n",
2542 /* try to reconfigure queues next time */
2543 port->need_reconfig_queues = 1;
2546 /* setup hairpin queues */
2547 if (setup_hairpin_queues(pi) != 0)
2550 configure_rxtx_dump_callbacks(verbose_level);
2552 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2556 "Port %d: Failed to disable Ptype parsing\n",
2561 if (rte_eth_dev_start(pi) < 0) {
2562 printf("Fail to start port %d\n", pi);
2564 /* Fail to setup rx queue, return */
2565 if (rte_atomic16_cmpset(&(port->port_status),
2566 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2567 printf("Port %d can not be set back to "
2572 if (rte_atomic16_cmpset(&(port->port_status),
2573 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2574 printf("Port %d can not be set into started\n", pi);
2576 if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
2577 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2578 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2579 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2580 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2582 /* at least one port started, need checking link status */
2583 need_check_link_status = 1;
2586 if (need_check_link_status == 1 && !no_link_check)
2587 check_all_ports_link_status(RTE_PORT_ALL);
2588 else if (need_check_link_status == 0)
2589 printf("Please stop the ports first\n");
2596 stop_port(portid_t pid)
2599 struct rte_port *port;
2600 int need_check_link_status = 0;
2607 if (port_id_is_invalid(pid, ENABLED_WARN))
2610 printf("Stopping ports...\n");
2612 RTE_ETH_FOREACH_DEV(pi) {
2613 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2616 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2617 printf("Please remove port %d from forwarding configuration.\n", pi);
2621 if (port_is_bonding_slave(pi)) {
2622 printf("Please remove port %d from bonded device.\n", pi);
2627 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2628 RTE_PORT_HANDLING) == 0)
2631 rte_eth_dev_stop(pi);
2633 if (rte_atomic16_cmpset(&(port->port_status),
2634 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2635 printf("Port %d can not be set into stopped\n", pi);
2636 need_check_link_status = 1;
2638 if (need_check_link_status && !no_link_check)
2639 check_all_ports_link_status(RTE_PORT_ALL);
2645 remove_invalid_ports_in(portid_t *array, portid_t *total)
2648 portid_t new_total = 0;
2650 for (i = 0; i < *total; i++)
2651 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2652 array[new_total] = array[i];
2659 remove_invalid_ports(void)
2661 remove_invalid_ports_in(ports_ids, &nb_ports);
2662 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2663 nb_cfg_ports = nb_fwd_ports;
2667 close_port(portid_t pid)
2670 struct rte_port *port;
2672 if (port_id_is_invalid(pid, ENABLED_WARN))
2675 printf("Closing ports...\n");
2677 RTE_ETH_FOREACH_DEV(pi) {
2678 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2681 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2682 printf("Please remove port %d from forwarding configuration.\n", pi);
2686 if (port_is_bonding_slave(pi)) {
2687 printf("Please remove port %d from bonded device.\n", pi);
2692 if (rte_atomic16_cmpset(&(port->port_status),
2693 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2694 printf("Port %d is already closed\n", pi);
2698 port_flow_flush(pi);
2699 rte_eth_dev_close(pi);
2702 remove_invalid_ports();
2707 reset_port(portid_t pid)
2711 struct rte_port *port;
2713 if (port_id_is_invalid(pid, ENABLED_WARN))
2716 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2717 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2718 printf("Can not reset port(s), please stop port(s) first.\n");
2722 printf("Resetting ports...\n");
2724 RTE_ETH_FOREACH_DEV(pi) {
2725 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2728 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2729 printf("Please remove port %d from forwarding "
2730 "configuration.\n", pi);
2734 if (port_is_bonding_slave(pi)) {
2735 printf("Please remove port %d from bonded device.\n",
2740 diag = rte_eth_dev_reset(pi);
2743 port->need_reconfig = 1;
2744 port->need_reconfig_queues = 1;
2746 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2754 attach_port(char *identifier)
2757 struct rte_dev_iterator iterator;
2759 printf("Attaching a new port...\n");
2761 if (identifier == NULL) {
2762 printf("Invalid parameters are specified\n");
2766 if (rte_dev_probe(identifier) < 0) {
2767 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2771 /* first attach mode: event */
2772 if (setup_on_probe_event) {
2773 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2774 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2775 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2776 ports[pi].need_setup != 0)
2777 setup_attached_port(pi);
2781 /* second attach mode: iterator */
2782 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2783 /* setup ports matching the devargs used for probing */
2784 if (port_is_forwarding(pi))
2785 continue; /* port was already attached before */
2786 setup_attached_port(pi);
2791 setup_attached_port(portid_t pi)
2793 unsigned int socket_id;
2796 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2797 /* if socket_id is invalid, set to the first available socket. */
2798 if (check_socket_id(socket_id) < 0)
2799 socket_id = socket_ids[0];
2800 reconfig(pi, socket_id);
2801 ret = rte_eth_promiscuous_enable(pi);
2803 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2804 pi, rte_strerror(-ret));
2806 ports_ids[nb_ports++] = pi;
2807 fwd_ports_ids[nb_fwd_ports++] = pi;
2808 nb_cfg_ports = nb_fwd_ports;
2809 ports[pi].need_setup = 0;
2810 ports[pi].port_status = RTE_PORT_STOPPED;
2812 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2817 detach_device(struct rte_device *dev)
2822 printf("Device already removed\n");
2826 printf("Removing a device...\n");
2828 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2829 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2830 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
2831 printf("Port %u not stopped\n", sibling);
2834 port_flow_flush(sibling);
2838 if (rte_dev_remove(dev) < 0) {
2839 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2842 remove_invalid_ports();
2844 printf("Device is detached\n");
2845 printf("Now total ports is %d\n", nb_ports);
2851 detach_port_device(portid_t port_id)
2853 if (port_id_is_invalid(port_id, ENABLED_WARN))
2856 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2857 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2858 printf("Port not stopped\n");
2861 printf("Port was not closed\n");
2864 detach_device(rte_eth_devices[port_id].device);
2868 detach_devargs(char *identifier)
2870 struct rte_dev_iterator iterator;
2871 struct rte_devargs da;
2874 printf("Removing a device...\n");
2876 memset(&da, 0, sizeof(da));
2877 if (rte_devargs_parsef(&da, "%s", identifier)) {
2878 printf("cannot parse identifier\n");
2884 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2885 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2886 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2887 printf("Port %u not stopped\n", port_id);
2888 rte_eth_iterator_cleanup(&iterator);
2891 port_flow_flush(port_id);
2895 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2896 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2897 da.name, da.bus->name);
2901 remove_invalid_ports();
2903 printf("Device %s is detached\n", identifier);
2904 printf("Now total ports is %d\n", nb_ports);
2916 stop_packet_forwarding();
2918 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2920 if (mp_alloc_type == MP_ALLOC_ANON)
2921 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2925 if (ports != NULL) {
2927 RTE_ETH_FOREACH_DEV(pt_id) {
2928 printf("\nStopping port %d...\n", pt_id);
2932 RTE_ETH_FOREACH_DEV(pt_id) {
2933 printf("\nShutting down port %d...\n", pt_id);
2940 ret = rte_dev_event_monitor_stop();
2943 "fail to stop device event monitor.");
2947 ret = rte_dev_event_callback_unregister(NULL,
2948 dev_event_callback, NULL);
2951 "fail to unregister device event callback.\n");
2955 ret = rte_dev_hotplug_handle_disable();
2958 "fail to disable hotplug handling.\n");
2962 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2964 rte_mempool_free(mempools[i]);
2967 printf("\nBye...\n");
2970 typedef void (*cmd_func_t)(void);
2971 struct pmd_test_command {
2972 const char *cmd_name;
2973 cmd_func_t cmd_func;
2976 /* Check the link status of all ports in up to 9s, and print them finally */
2978 check_all_ports_link_status(uint32_t port_mask)
2980 #define CHECK_INTERVAL 100 /* 100ms */
2981 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2983 uint8_t count, all_ports_up, print_flag = 0;
2984 struct rte_eth_link link;
2986 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
2988 printf("Checking link statuses...\n");
2990 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2992 RTE_ETH_FOREACH_DEV(portid) {
2993 if ((port_mask & (1 << portid)) == 0)
2995 memset(&link, 0, sizeof(link));
2996 ret = rte_eth_link_get_nowait(portid, &link);
2999 if (print_flag == 1)
3000 printf("Port %u link get failed: %s\n",
3001 portid, rte_strerror(-ret));
3004 /* print link status if flag set */
3005 if (print_flag == 1) {
3006 rte_eth_link_to_str(link_status,
3007 sizeof(link_status), &link);
3008 printf("Port %d %s\n", portid, link_status);
3011 /* clear all_ports_up flag if any link down */
3012 if (link.link_status == ETH_LINK_DOWN) {
3017 /* after finally printing all link status, get out */
3018 if (print_flag == 1)
3021 if (all_ports_up == 0) {
3023 rte_delay_ms(CHECK_INTERVAL);
3026 /* set the print_flag if all ports up or timeout */
3027 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3037 rmv_port_callback(void *arg)
3039 int need_to_start = 0;
3040 int org_no_link_check = no_link_check;
3041 portid_t port_id = (intptr_t)arg;
3042 struct rte_device *dev;
3044 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3046 if (!test_done && port_is_forwarding(port_id)) {
3048 stop_packet_forwarding();
3052 no_link_check = org_no_link_check;
3054 /* Save rte_device pointer before closing ethdev port */
3055 dev = rte_eth_devices[port_id].device;
3056 close_port(port_id);
3057 detach_device(dev); /* might be already removed or have more ports */
3060 start_packet_forwarding(0);
3063 /* This function is used by the interrupt thread */
3065 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3068 RTE_SET_USED(param);
3069 RTE_SET_USED(ret_param);
3071 if (type >= RTE_ETH_EVENT_MAX) {
3072 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3073 port_id, __func__, type);
3075 } else if (event_print_mask & (UINT32_C(1) << type)) {
3076 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3077 eth_event_desc[type]);
3082 case RTE_ETH_EVENT_NEW:
3083 ports[port_id].need_setup = 1;
3084 ports[port_id].port_status = RTE_PORT_HANDLING;
3086 case RTE_ETH_EVENT_INTR_RMV:
3087 if (port_id_is_invalid(port_id, DISABLED_WARN))
3089 if (rte_eal_alarm_set(100000,
3090 rmv_port_callback, (void *)(intptr_t)port_id))
3091 fprintf(stderr, "Could not set up deferred device removal\n");
3093 case RTE_ETH_EVENT_DESTROY:
3094 ports[port_id].port_status = RTE_PORT_CLOSED;
3095 printf("Port %u is closed\n", port_id);
3104 register_eth_event_callback(void)
3107 enum rte_eth_event_type event;
3109 for (event = RTE_ETH_EVENT_UNKNOWN;
3110 event < RTE_ETH_EVENT_MAX; event++) {
3111 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3116 TESTPMD_LOG(ERR, "Failed to register callback for "
3117 "%s event\n", eth_event_desc[event]);
3125 /* This function is used by the interrupt thread */
3127 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3128 __rte_unused void *arg)
3133 if (type >= RTE_DEV_EVENT_MAX) {
3134 fprintf(stderr, "%s called upon invalid event %d\n",
3140 case RTE_DEV_EVENT_REMOVE:
3141 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3143 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3145 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3150 * Because the user's callback is invoked in eal interrupt
3151 * callback, the interrupt callback need to be finished before
3152 * it can be unregistered when detaching device. So finish
3153 * callback soon and use a deferred removal to detach device
3154 * is need. It is a workaround, once the device detaching be
3155 * moved into the eal in the future, the deferred removal could
3158 if (rte_eal_alarm_set(100000,
3159 rmv_port_callback, (void *)(intptr_t)port_id))
3161 "Could not set up deferred device removal\n");
3163 case RTE_DEV_EVENT_ADD:
3164 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3166 /* TODO: After finish kernel driver binding,
3167 * begin to attach port.
3176 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3180 uint8_t mapping_found = 0;
3182 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3183 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3184 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
3185 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
3186 tx_queue_stats_mappings[i].queue_id,
3187 tx_queue_stats_mappings[i].stats_counter_id);
3194 port->tx_queue_stats_mapping_enabled = 1;
3199 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
3203 uint8_t mapping_found = 0;
3205 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3206 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3207 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
3208 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
3209 rx_queue_stats_mappings[i].queue_id,
3210 rx_queue_stats_mappings[i].stats_counter_id);
3217 port->rx_queue_stats_mapping_enabled = 1;
3222 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
3226 diag = set_tx_queue_stats_mapping_registers(pi, port);
3228 if (diag == -ENOTSUP) {
3229 port->tx_queue_stats_mapping_enabled = 0;
3230 printf("TX queue stats mapping not supported port id=%d\n", pi);
3233 rte_exit(EXIT_FAILURE,
3234 "set_tx_queue_stats_mapping_registers "
3235 "failed for port id=%d diag=%d\n",
3239 diag = set_rx_queue_stats_mapping_registers(pi, port);
3241 if (diag == -ENOTSUP) {
3242 port->rx_queue_stats_mapping_enabled = 0;
3243 printf("RX queue stats mapping not supported port id=%d\n", pi);
3246 rte_exit(EXIT_FAILURE,
3247 "set_rx_queue_stats_mapping_registers "
3248 "failed for port id=%d diag=%d\n",
3254 rxtx_port_config(struct rte_port *port)
3259 for (qid = 0; qid < nb_rxq; qid++) {
3260 offloads = port->rx_conf[qid].offloads;
3261 port->rx_conf[qid] = port->dev_info.default_rxconf;
3263 port->rx_conf[qid].offloads = offloads;
3265 /* Check if any Rx parameters have been passed */
3266 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3267 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3269 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3270 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3272 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3273 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3275 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3276 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3278 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3279 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3281 port->nb_rx_desc[qid] = nb_rxd;
3284 for (qid = 0; qid < nb_txq; qid++) {
3285 offloads = port->tx_conf[qid].offloads;
3286 port->tx_conf[qid] = port->dev_info.default_txconf;
3288 port->tx_conf[qid].offloads = offloads;
3290 /* Check if any Tx parameters have been passed */
3291 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3292 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3294 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3295 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3297 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3298 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3300 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3301 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3303 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3304 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3306 port->nb_tx_desc[qid] = nb_txd;
3311 init_port_config(void)
3314 struct rte_port *port;
3317 RTE_ETH_FOREACH_DEV(pid) {
3319 port->dev_conf.fdir_conf = fdir_conf;
3321 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3326 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3327 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3328 rss_hf & port->dev_info.flow_type_rss_offloads;
3330 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3331 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3334 if (port->dcb_flag == 0) {
3335 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3336 port->dev_conf.rxmode.mq_mode =
3337 (enum rte_eth_rx_mq_mode)
3338 (rx_mq_mode & ETH_MQ_RX_RSS);
3340 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3343 rxtx_port_config(port);
3345 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3349 map_port_queue_stats_mapping_registers(pid, port);
3350 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
3351 rte_pmd_ixgbe_bypass_init(pid);
3354 if (lsc_interrupt &&
3355 (rte_eth_devices[pid].data->dev_flags &
3356 RTE_ETH_DEV_INTR_LSC))
3357 port->dev_conf.intr_conf.lsc = 1;
3358 if (rmv_interrupt &&
3359 (rte_eth_devices[pid].data->dev_flags &
3360 RTE_ETH_DEV_INTR_RMV))
3361 port->dev_conf.intr_conf.rmv = 1;
3365 void set_port_slave_flag(portid_t slave_pid)
3367 struct rte_port *port;
3369 port = &ports[slave_pid];
3370 port->slave_flag = 1;
3373 void clear_port_slave_flag(portid_t slave_pid)
3375 struct rte_port *port;
3377 port = &ports[slave_pid];
3378 port->slave_flag = 0;
3381 uint8_t port_is_bonding_slave(portid_t slave_pid)
3383 struct rte_port *port;
3385 port = &ports[slave_pid];
3386 if ((rte_eth_devices[slave_pid].data->dev_flags &
3387 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3392 const uint16_t vlan_tags[] = {
3393 0, 1, 2, 3, 4, 5, 6, 7,
3394 8, 9, 10, 11, 12, 13, 14, 15,
3395 16, 17, 18, 19, 20, 21, 22, 23,
3396 24, 25, 26, 27, 28, 29, 30, 31
3400 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3401 enum dcb_mode_enable dcb_mode,
3402 enum rte_eth_nb_tcs num_tcs,
3407 struct rte_eth_rss_conf rss_conf;
3410 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3411 * given above, and the number of traffic classes available for use.
3413 if (dcb_mode == DCB_VT_ENABLED) {
3414 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3415 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3416 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3417 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3419 /* VMDQ+DCB RX and TX configurations */
3420 vmdq_rx_conf->enable_default_pool = 0;
3421 vmdq_rx_conf->default_pool = 0;
3422 vmdq_rx_conf->nb_queue_pools =
3423 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3424 vmdq_tx_conf->nb_queue_pools =
3425 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3427 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3428 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3429 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3430 vmdq_rx_conf->pool_map[i].pools =
3431 1 << (i % vmdq_rx_conf->nb_queue_pools);
3433 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3434 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3435 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3438 /* set DCB mode of RX and TX of multiple queues */
3439 eth_conf->rxmode.mq_mode =
3440 (enum rte_eth_rx_mq_mode)
3441 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3442 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3444 struct rte_eth_dcb_rx_conf *rx_conf =
3445 ð_conf->rx_adv_conf.dcb_rx_conf;
3446 struct rte_eth_dcb_tx_conf *tx_conf =
3447 ð_conf->tx_adv_conf.dcb_tx_conf;
3449 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3451 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3455 rx_conf->nb_tcs = num_tcs;
3456 tx_conf->nb_tcs = num_tcs;
3458 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3459 rx_conf->dcb_tc[i] = i % num_tcs;
3460 tx_conf->dcb_tc[i] = i % num_tcs;
3463 eth_conf->rxmode.mq_mode =
3464 (enum rte_eth_rx_mq_mode)
3465 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3466 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3467 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3471 eth_conf->dcb_capability_en =
3472 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3474 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3480 init_port_dcb_config(portid_t pid,
3481 enum dcb_mode_enable dcb_mode,
3482 enum rte_eth_nb_tcs num_tcs,
3485 struct rte_eth_conf port_conf;
3486 struct rte_port *rte_port;
3490 rte_port = &ports[pid];
3492 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3493 /* Enter DCB configuration status */
3496 port_conf.rxmode = rte_port->dev_conf.rxmode;
3497 port_conf.txmode = rte_port->dev_conf.txmode;
3499 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3500 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3503 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3505 /* re-configure the device . */
3506 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3510 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3514 /* If dev_info.vmdq_pool_base is greater than 0,
3515 * the queue id of vmdq pools is started after pf queues.
3517 if (dcb_mode == DCB_VT_ENABLED &&
3518 rte_port->dev_info.vmdq_pool_base > 0) {
3519 printf("VMDQ_DCB multi-queue mode is nonsensical"
3520 " for port %d.", pid);
3524 /* Assume the ports in testpmd have the same dcb capability
3525 * and has the same number of rxq and txq in dcb mode
3527 if (dcb_mode == DCB_VT_ENABLED) {
3528 if (rte_port->dev_info.max_vfs > 0) {
3529 nb_rxq = rte_port->dev_info.nb_rx_queues;
3530 nb_txq = rte_port->dev_info.nb_tx_queues;
3532 nb_rxq = rte_port->dev_info.max_rx_queues;
3533 nb_txq = rte_port->dev_info.max_tx_queues;
3536 /*if vt is disabled, use all pf queues */
3537 if (rte_port->dev_info.vmdq_pool_base == 0) {
3538 nb_rxq = rte_port->dev_info.max_rx_queues;
3539 nb_txq = rte_port->dev_info.max_tx_queues;
3541 nb_rxq = (queueid_t)num_tcs;
3542 nb_txq = (queueid_t)num_tcs;
3546 rx_free_thresh = 64;
3548 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3550 rxtx_port_config(rte_port);
3552 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3553 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3554 rx_vft_set(pid, vlan_tags[i], 1);
3556 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3560 map_port_queue_stats_mapping_registers(pid, rte_port);
3562 rte_port->dcb_flag = 1;
3570 /* Configuration of Ethernet ports. */
3571 ports = rte_zmalloc("testpmd: ports",
3572 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3573 RTE_CACHE_LINE_SIZE);
3574 if (ports == NULL) {
3575 rte_exit(EXIT_FAILURE,
3576 "rte_zmalloc(%d struct rte_port) failed\n",
3580 /* Initialize ports NUMA structures */
3581 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3582 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3583 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3597 const char clr[] = { 27, '[', '2', 'J', '\0' };
3598 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3600 /* Clear screen and move to top left */
3601 printf("%s%s", clr, top_left);
3603 printf("\nPort statistics ====================================");
3604 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3605 nic_stats_display(fwd_ports_ids[i]);
3611 signal_handler(int signum)
3613 if (signum == SIGINT || signum == SIGTERM) {
3614 printf("\nSignal %d received, preparing to exit...\n",
3616 #ifdef RTE_LIBRTE_PDUMP
3617 /* uninitialize packet capture framework */
3620 #ifdef RTE_LIBRTE_LATENCY_STATS
3621 if (latencystats_enabled != 0)
3622 rte_latencystats_uninit();
3625 /* Set flag to indicate the force termination. */
3627 /* exit with the expected status */
3628 signal(signum, SIG_DFL);
3629 kill(getpid(), signum);
3634 main(int argc, char** argv)
3641 signal(SIGINT, signal_handler);
3642 signal(SIGTERM, signal_handler);
3644 testpmd_logtype = rte_log_register("testpmd");
3645 if (testpmd_logtype < 0)
3646 rte_exit(EXIT_FAILURE, "Cannot register log type");
3647 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3649 diag = rte_eal_init(argc, argv);
3651 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3652 rte_strerror(rte_errno));
3654 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3655 rte_exit(EXIT_FAILURE,
3656 "Secondary process type not supported.\n");
3658 ret = register_eth_event_callback();
3660 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3662 #ifdef RTE_LIBRTE_PDUMP
3663 /* initialize packet capture framework */
3668 RTE_ETH_FOREACH_DEV(port_id) {
3669 ports_ids[count] = port_id;
3672 nb_ports = (portid_t) count;
3674 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3676 /* allocate port structures, and init them */
3679 set_def_fwd_config();
3681 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3682 "Check the core mask argument\n");
3684 /* Bitrate/latency stats disabled by default */
3685 #ifdef RTE_LIBRTE_BITRATESTATS
3686 bitrate_enabled = 0;
3688 #ifdef RTE_LIBRTE_LATENCY_STATS
3689 latencystats_enabled = 0;
3692 /* on FreeBSD, mlockall() is disabled by default */
3693 #ifdef RTE_EXEC_ENV_FREEBSD
3702 launch_args_parse(argc, argv);
3704 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3705 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3709 if (tx_first && interactive)
3710 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3711 "interactive mode.\n");
3713 if (tx_first && lsc_interrupt) {
3714 printf("Warning: lsc_interrupt needs to be off when "
3715 " using tx_first. Disabling.\n");
3719 if (!nb_rxq && !nb_txq)
3720 printf("Warning: Either rx or tx queues should be non-zero\n");
3722 if (nb_rxq > 1 && nb_rxq > nb_txq)
3723 printf("Warning: nb_rxq=%d enables RSS configuration, "
3724 "but nb_txq=%d will prevent to fully test it.\n",
3730 ret = rte_dev_hotplug_handle_enable();
3733 "fail to enable hotplug handling.");
3737 ret = rte_dev_event_monitor_start();
3740 "fail to start device event monitoring.");
3744 ret = rte_dev_event_callback_register(NULL,
3745 dev_event_callback, NULL);
3748 "fail to register device event callback\n");
3753 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3754 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3756 /* set all ports to promiscuous mode by default */
3757 RTE_ETH_FOREACH_DEV(port_id) {
3758 ret = rte_eth_promiscuous_enable(port_id);
3760 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3761 port_id, rte_strerror(-ret));
3764 /* Init metrics library */
3765 rte_metrics_init(rte_socket_id());
3767 #ifdef RTE_LIBRTE_LATENCY_STATS
3768 if (latencystats_enabled != 0) {
3769 int ret = rte_latencystats_init(1, NULL);
3771 printf("Warning: latencystats init()"
3772 " returned error %d\n", ret);
3773 printf("Latencystats running on lcore %d\n",
3774 latencystats_lcore_id);
3778 /* Setup bitrate stats */
3779 #ifdef RTE_LIBRTE_BITRATESTATS
3780 if (bitrate_enabled != 0) {
3781 bitrate_data = rte_stats_bitrate_create();
3782 if (bitrate_data == NULL)
3783 rte_exit(EXIT_FAILURE,
3784 "Could not allocate bitrate data.\n");
3785 rte_stats_bitrate_reg(bitrate_data);
3789 #ifdef RTE_LIBRTE_CMDLINE
3790 if (strlen(cmdline_filename) != 0)
3791 cmdline_read_from_file(cmdline_filename);
3793 if (interactive == 1) {
3795 printf("Start automatic packet forwarding\n");
3796 start_packet_forwarding(0);
3808 printf("No commandline core given, start packet forwarding\n");
3809 start_packet_forwarding(tx_first);
3810 if (stats_period != 0) {
3811 uint64_t prev_time = 0, cur_time, diff_time = 0;
3812 uint64_t timer_period;
3814 /* Convert to number of cycles */
3815 timer_period = stats_period * rte_get_timer_hz();
3817 while (f_quit == 0) {
3818 cur_time = rte_get_timer_cycles();
3819 diff_time += cur_time - prev_time;
3821 if (diff_time >= timer_period) {
3823 /* Reset the timer */
3826 /* Sleep to avoid unnecessary checks */
3827 prev_time = cur_time;
3832 printf("Press enter to exit\n");
3833 rc = read(0, &c, 1);
3839 ret = rte_eal_cleanup();
3841 rte_exit(EXIT_FAILURE,
3842 "EAL cleanup failed: %s\n", strerror(-ret));
3844 return EXIT_SUCCESS;