1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
242 * Configurable number of RX/TX ring descriptors.
243 * Defaults are supplied by drivers via ethdev.
245 #define RTE_TEST_RX_DESC_DEFAULT 0
246 #define RTE_TEST_TX_DESC_DEFAULT 0
247 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250 #define RTE_PMD_PARAM_UNSET -1
252 * Configurable values of RX and TX ring threshold registers.
255 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
264 * Configurable value of RX free threshold.
266 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX drop enable.
271 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
274 * Configurable value of TX free threshold.
276 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX RS bit threshold.
281 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of buffered packets before sending.
286 uint16_t noisy_tx_sw_bufsz;
289 * Configurable value of packet buffer timeout.
291 uint16_t noisy_tx_sw_buf_flush_time;
294 * Configurable value for size of VNF internal memory area
295 * used for simulating noisy neighbour behaviour
297 uint64_t noisy_lkup_mem_sz;
300 * Configurable value of number of random writes done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_writes;
306 * Configurable value of number of random reads done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads;
312 * Configurable value of number of random reads/writes done in
313 * VNF simulation memory area.
315 uint64_t noisy_lkup_num_reads_writes;
318 * Receive Side Scaling (RSS) configuration.
320 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
323 * Port topology configuration
325 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
328 * Avoids to flush all the RX streams before starts forwarding.
330 uint8_t no_flush_rx = 0; /* flush by default */
333 * Flow API isolated mode.
335 uint8_t flow_isolate_all;
338 * Avoids to check link status when starting/stopping a port.
340 uint8_t no_link_check = 0; /* check by default */
343 * Don't automatically start all ports in interactive mode.
345 uint8_t no_device_start = 0;
348 * Enable link status change notification
350 uint8_t lsc_interrupt = 1; /* enabled by default */
353 * Enable device removal notification.
355 uint8_t rmv_interrupt = 1; /* enabled by default */
357 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
359 /* After attach, port setup is called on event or by iterator */
360 bool setup_on_probe_event = true;
362 /* Pretty printing of ethdev events */
363 static const char * const eth_event_desc[] = {
364 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
365 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
366 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
367 [RTE_ETH_EVENT_INTR_RESET] = "reset",
368 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
369 [RTE_ETH_EVENT_IPSEC] = "IPsec",
370 [RTE_ETH_EVENT_MACSEC] = "MACsec",
371 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
372 [RTE_ETH_EVENT_NEW] = "device probed",
373 [RTE_ETH_EVENT_DESTROY] = "device released",
374 [RTE_ETH_EVENT_MAX] = NULL,
378 * Display or mask ether events
379 * Default to all events except VF_MBOX
381 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
382 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
383 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
384 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
385 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
386 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
387 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
389 * Decide if all memory are locked for performance.
394 * NIC bypass mode configuration options.
397 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
398 /* The NIC bypass watchdog timeout. */
399 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
403 #ifdef RTE_LIBRTE_LATENCY_STATS
406 * Set when latency stats is enabled in the commandline
408 uint8_t latencystats_enabled;
411 * Lcore ID to serive latency statistics.
413 lcoreid_t latencystats_lcore_id = -1;
418 * Ethernet device configuration.
420 struct rte_eth_rxmode rx_mode = {
421 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
422 /**< Default maximum frame length. */
425 struct rte_eth_txmode tx_mode = {
426 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
429 struct rte_fdir_conf fdir_conf = {
430 .mode = RTE_FDIR_MODE_NONE,
431 .pballoc = RTE_FDIR_PBALLOC_64K,
432 .status = RTE_FDIR_REPORT_STATUS,
434 .vlan_tci_mask = 0xFFEF,
436 .src_ip = 0xFFFFFFFF,
437 .dst_ip = 0xFFFFFFFF,
440 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
441 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
443 .src_port_mask = 0xFFFF,
444 .dst_port_mask = 0xFFFF,
445 .mac_addr_byte_mask = 0xFF,
446 .tunnel_type_mask = 1,
447 .tunnel_id_mask = 0xFFFFFFFF,
452 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
454 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
455 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
457 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
458 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
460 uint16_t nb_tx_queue_stats_mappings = 0;
461 uint16_t nb_rx_queue_stats_mappings = 0;
464 * Display zero values by default for xstats
466 uint8_t xstats_hide_zero;
468 unsigned int num_sockets = 0;
469 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
471 #ifdef RTE_LIBRTE_BITRATE
472 /* Bitrate statistics */
473 struct rte_stats_bitrates *bitrate_data;
474 lcoreid_t bitrate_lcore_id;
475 uint8_t bitrate_enabled;
478 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
479 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
481 struct vxlan_encap_conf vxlan_encap_conf = {
485 .vni = "\x00\x00\x00",
487 .udp_dst = RTE_BE16(4789),
488 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
489 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
490 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
491 "\x00\x00\x00\x00\x00\x00\x00\x01",
492 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
493 "\x00\x00\x00\x00\x00\x00\x11\x11",
497 .eth_src = "\x00\x00\x00\x00\x00\x00",
498 .eth_dst = "\xff\xff\xff\xff\xff\xff",
501 struct nvgre_encap_conf nvgre_encap_conf = {
504 .tni = "\x00\x00\x00",
505 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
506 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
507 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
508 "\x00\x00\x00\x00\x00\x00\x00\x01",
509 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
510 "\x00\x00\x00\x00\x00\x00\x11\x11",
512 .eth_src = "\x00\x00\x00\x00\x00\x00",
513 .eth_dst = "\xff\xff\xff\xff\xff\xff",
516 /* Forward function declarations */
517 static void setup_attached_port(portid_t pi);
518 static void map_port_queue_stats_mapping_registers(portid_t pi,
519 struct rte_port *port);
520 static void check_all_ports_link_status(uint32_t port_mask);
521 static int eth_event_callback(portid_t port_id,
522 enum rte_eth_event_type type,
523 void *param, void *ret_param);
524 static void dev_event_callback(const char *device_name,
525 enum rte_dev_event_type type,
529 * Check if all the ports are started.
530 * If yes, return positive value. If not, return zero.
532 static int all_ports_started(void);
534 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
535 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
538 * Helper function to check if socket is already discovered.
539 * If yes, return positive value. If not, return zero.
542 new_socket_id(unsigned int socket_id)
546 for (i = 0; i < num_sockets; i++) {
547 if (socket_ids[i] == socket_id)
554 * Setup default configuration.
557 set_default_fwd_lcores_config(void)
561 unsigned int sock_num;
564 for (i = 0; i < RTE_MAX_LCORE; i++) {
565 if (!rte_lcore_is_enabled(i))
567 sock_num = rte_lcore_to_socket_id(i);
568 if (new_socket_id(sock_num)) {
569 if (num_sockets >= RTE_MAX_NUMA_NODES) {
570 rte_exit(EXIT_FAILURE,
571 "Total sockets greater than %u\n",
574 socket_ids[num_sockets++] = sock_num;
576 if (i == rte_get_master_lcore())
578 fwd_lcores_cpuids[nb_lc++] = i;
580 nb_lcores = (lcoreid_t) nb_lc;
581 nb_cfg_lcores = nb_lcores;
586 set_def_peer_eth_addrs(void)
590 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
591 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
592 peer_eth_addrs[i].addr_bytes[5] = i;
597 set_default_fwd_ports_config(void)
602 RTE_ETH_FOREACH_DEV(pt_id) {
603 fwd_ports_ids[i++] = pt_id;
605 /* Update sockets info according to the attached device */
606 int socket_id = rte_eth_dev_socket_id(pt_id);
607 if (socket_id >= 0 && new_socket_id(socket_id)) {
608 if (num_sockets >= RTE_MAX_NUMA_NODES) {
609 rte_exit(EXIT_FAILURE,
610 "Total sockets greater than %u\n",
613 socket_ids[num_sockets++] = socket_id;
617 nb_cfg_ports = nb_ports;
618 nb_fwd_ports = nb_ports;
622 set_def_fwd_config(void)
624 set_default_fwd_lcores_config();
625 set_def_peer_eth_addrs();
626 set_default_fwd_ports_config();
629 /* extremely pessimistic estimation of memory required to create a mempool */
631 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
633 unsigned int n_pages, mbuf_per_pg, leftover;
634 uint64_t total_mem, mbuf_mem, obj_sz;
636 /* there is no good way to predict how much space the mempool will
637 * occupy because it will allocate chunks on the fly, and some of those
638 * will come from default DPDK memory while some will come from our
639 * external memory, so just assume 128MB will be enough for everyone.
641 uint64_t hdr_mem = 128 << 20;
643 /* account for possible non-contiguousness */
644 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
646 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
650 mbuf_per_pg = pgsz / obj_sz;
651 leftover = (nb_mbufs % mbuf_per_pg) > 0;
652 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
654 mbuf_mem = n_pages * pgsz;
656 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
658 if (total_mem > SIZE_MAX) {
659 TESTPMD_LOG(ERR, "Memory size too big\n");
662 *out = (size_t)total_mem;
668 pagesz_flags(uint64_t page_sz)
670 /* as per mmap() manpage, all page sizes are log2 of page size
671 * shifted by MAP_HUGE_SHIFT
673 int log2 = rte_log2_u64(page_sz);
675 return (log2 << HUGE_SHIFT);
679 alloc_mem(size_t memsz, size_t pgsz, bool huge)
684 /* allocate anonymous hugepages */
685 flags = MAP_ANONYMOUS | MAP_PRIVATE;
687 flags |= HUGE_FLAG | pagesz_flags(pgsz);
689 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
690 if (addr == MAP_FAILED)
696 struct extmem_param {
700 rte_iova_t *iova_table;
701 unsigned int iova_table_len;
705 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
708 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
709 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
710 unsigned int cur_page, n_pages, pgsz_idx;
711 size_t mem_sz, cur_pgsz;
712 rte_iova_t *iovas = NULL;
716 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
717 /* skip anything that is too big */
718 if (pgsizes[pgsz_idx] > SIZE_MAX)
721 cur_pgsz = pgsizes[pgsz_idx];
723 /* if we were told not to allocate hugepages, override */
725 cur_pgsz = sysconf(_SC_PAGESIZE);
727 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
729 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
733 /* allocate our memory */
734 addr = alloc_mem(mem_sz, cur_pgsz, huge);
736 /* if we couldn't allocate memory with a specified page size,
737 * that doesn't mean we can't do it with other page sizes, so
743 /* store IOVA addresses for every page in this memory area */
744 n_pages = mem_sz / cur_pgsz;
746 iovas = malloc(sizeof(*iovas) * n_pages);
749 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
752 /* lock memory if it's not huge pages */
756 /* populate IOVA addresses */
757 for (cur_page = 0; cur_page < n_pages; cur_page++) {
762 offset = cur_pgsz * cur_page;
763 cur = RTE_PTR_ADD(addr, offset);
765 /* touch the page before getting its IOVA */
766 *(volatile char *)cur = 0;
768 iova = rte_mem_virt2iova(cur);
770 iovas[cur_page] = iova;
775 /* if we couldn't allocate anything */
781 param->pgsz = cur_pgsz;
782 param->iova_table = iovas;
783 param->iova_table_len = n_pages;
790 munmap(addr, mem_sz);
796 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
798 struct extmem_param param;
801 memset(¶m, 0, sizeof(param));
803 /* check if our heap exists */
804 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
806 /* create our heap */
807 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
809 TESTPMD_LOG(ERR, "Cannot create heap\n");
814 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
816 TESTPMD_LOG(ERR, "Cannot create memory area\n");
820 /* we now have a valid memory area, so add it to heap */
821 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
822 param.addr, param.len, param.iova_table,
823 param.iova_table_len, param.pgsz);
825 /* when using VFIO, memory is automatically mapped for DMA by EAL */
827 /* not needed any more */
828 free(param.iova_table);
831 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
832 munmap(param.addr, param.len);
838 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
844 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
845 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
850 RTE_ETH_FOREACH_DEV(pid) {
851 struct rte_eth_dev *dev =
852 &rte_eth_devices[pid];
854 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
858 "unable to DMA unmap addr 0x%p "
860 memhdr->addr, dev->data->name);
863 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
866 "unable to un-register addr 0x%p\n", memhdr->addr);
871 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
872 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
875 size_t page_size = sysconf(_SC_PAGESIZE);
878 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
882 "unable to register addr 0x%p\n", memhdr->addr);
885 RTE_ETH_FOREACH_DEV(pid) {
886 struct rte_eth_dev *dev =
887 &rte_eth_devices[pid];
889 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
893 "unable to DMA map addr 0x%p "
895 memhdr->addr, dev->data->name);
901 * Configuration initialisation done once at init time.
903 static struct rte_mempool *
904 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
905 unsigned int socket_id)
907 char pool_name[RTE_MEMPOOL_NAMESIZE];
908 struct rte_mempool *rte_mp = NULL;
911 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
912 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
915 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
916 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
918 switch (mp_alloc_type) {
919 case MP_ALLOC_NATIVE:
921 /* wrapper to rte_mempool_create() */
922 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
923 rte_mbuf_best_mempool_ops());
924 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
925 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
930 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
931 mb_size, (unsigned int) mb_mempool_cache,
932 sizeof(struct rte_pktmbuf_pool_private),
933 socket_id, mempool_flags);
937 if (rte_mempool_populate_anon(rte_mp) == 0) {
938 rte_mempool_free(rte_mp);
942 rte_pktmbuf_pool_init(rte_mp, NULL);
943 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
944 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
948 case MP_ALLOC_XMEM_HUGE:
951 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
953 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
954 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
957 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
959 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
961 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
962 rte_mbuf_best_mempool_ops());
963 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
964 mb_mempool_cache, 0, mbuf_seg_size,
970 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
975 if (rte_mp == NULL) {
976 rte_exit(EXIT_FAILURE,
977 "Creation of mbuf pool for socket %u failed: %s\n",
978 socket_id, rte_strerror(rte_errno));
979 } else if (verbose_level > 0) {
980 rte_mempool_dump(stdout, rte_mp);
986 * Check given socket id is valid or not with NUMA mode,
987 * if valid, return 0, else return -1
990 check_socket_id(const unsigned int socket_id)
992 static int warning_once = 0;
994 if (new_socket_id(socket_id)) {
995 if (!warning_once && numa_support)
996 printf("Warning: NUMA should be configured manually by"
997 " using --port-numa-config and"
998 " --ring-numa-config parameters along with"
1007 * Get the allowed maximum number of RX queues.
1008 * *pid return the port id which has minimal value of
1009 * max_rx_queues in all ports.
1012 get_allowed_max_nb_rxq(portid_t *pid)
1014 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
1015 bool max_rxq_valid = false;
1017 struct rte_eth_dev_info dev_info;
1019 RTE_ETH_FOREACH_DEV(pi) {
1020 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1023 max_rxq_valid = true;
1024 if (dev_info.max_rx_queues < allowed_max_rxq) {
1025 allowed_max_rxq = dev_info.max_rx_queues;
1029 return max_rxq_valid ? allowed_max_rxq : 0;
1033 * Check input rxq is valid or not.
1034 * If input rxq is not greater than any of maximum number
1035 * of RX queues of all ports, it is valid.
1036 * if valid, return 0, else return -1
1039 check_nb_rxq(queueid_t rxq)
1041 queueid_t allowed_max_rxq;
1044 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1045 if (rxq > allowed_max_rxq) {
1046 printf("Fail: input rxq (%u) can't be greater "
1047 "than max_rx_queues (%u) of port %u\n",
1057 * Get the allowed maximum number of TX queues.
1058 * *pid return the port id which has minimal value of
1059 * max_tx_queues in all ports.
1062 get_allowed_max_nb_txq(portid_t *pid)
1064 queueid_t allowed_max_txq = MAX_QUEUE_ID;
1065 bool max_txq_valid = false;
1067 struct rte_eth_dev_info dev_info;
1069 RTE_ETH_FOREACH_DEV(pi) {
1070 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1073 max_txq_valid = true;
1074 if (dev_info.max_tx_queues < allowed_max_txq) {
1075 allowed_max_txq = dev_info.max_tx_queues;
1079 return max_txq_valid ? allowed_max_txq : 0;
1083 * Check input txq is valid or not.
1084 * If input txq is not greater than any of maximum number
1085 * of TX queues of all ports, it is valid.
1086 * if valid, return 0, else return -1
1089 check_nb_txq(queueid_t txq)
1091 queueid_t allowed_max_txq;
1094 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1095 if (txq > allowed_max_txq) {
1096 printf("Fail: input txq (%u) can't be greater "
1097 "than max_tx_queues (%u) of port %u\n",
1110 struct rte_port *port;
1111 struct rte_mempool *mbp;
1112 unsigned int nb_mbuf_per_pool;
1114 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1115 struct rte_gro_param gro_param;
1122 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1124 /* Configuration of logical cores. */
1125 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1126 sizeof(struct fwd_lcore *) * nb_lcores,
1127 RTE_CACHE_LINE_SIZE);
1128 if (fwd_lcores == NULL) {
1129 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1130 "failed\n", nb_lcores);
1132 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1133 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1134 sizeof(struct fwd_lcore),
1135 RTE_CACHE_LINE_SIZE);
1136 if (fwd_lcores[lc_id] == NULL) {
1137 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1140 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1143 RTE_ETH_FOREACH_DEV(pid) {
1145 /* Apply default TxRx configuration for all ports */
1146 port->dev_conf.txmode = tx_mode;
1147 port->dev_conf.rxmode = rx_mode;
1149 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1151 rte_exit(EXIT_FAILURE,
1152 "rte_eth_dev_info_get() failed\n");
1154 if (!(port->dev_info.tx_offload_capa &
1155 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1156 port->dev_conf.txmode.offloads &=
1157 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1158 if (!(port->dev_info.tx_offload_capa &
1159 DEV_TX_OFFLOAD_MATCH_METADATA))
1160 port->dev_conf.txmode.offloads &=
1161 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1163 if (port_numa[pid] != NUMA_NO_CONFIG)
1164 port_per_socket[port_numa[pid]]++;
1166 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1169 * if socket_id is invalid,
1170 * set to the first available socket.
1172 if (check_socket_id(socket_id) < 0)
1173 socket_id = socket_ids[0];
1174 port_per_socket[socket_id]++;
1178 /* Apply Rx offloads configuration */
1179 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1180 port->rx_conf[k].offloads =
1181 port->dev_conf.rxmode.offloads;
1182 /* Apply Tx offloads configuration */
1183 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1184 port->tx_conf[k].offloads =
1185 port->dev_conf.txmode.offloads;
1187 /* set flag to initialize port/queue */
1188 port->need_reconfig = 1;
1189 port->need_reconfig_queues = 1;
1190 port->tx_metadata = 0;
1192 /* Check for maximum number of segments per MTU. Accordingly
1193 * update the mbuf data size.
1195 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1196 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1197 data_size = rx_mode.max_rx_pkt_len /
1198 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1200 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1202 mbuf_data_size = data_size +
1203 RTE_PKTMBUF_HEADROOM;
1210 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1214 * Create pools of mbuf.
1215 * If NUMA support is disabled, create a single pool of mbuf in
1216 * socket 0 memory by default.
1217 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1219 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1220 * nb_txd can be configured at run time.
1222 if (param_total_num_mbufs)
1223 nb_mbuf_per_pool = param_total_num_mbufs;
1225 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1226 (nb_lcores * mb_mempool_cache) +
1227 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1228 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1234 for (i = 0; i < num_sockets; i++)
1235 mempools[i] = mbuf_pool_create(mbuf_data_size,
1239 if (socket_num == UMA_NO_CONFIG)
1240 mempools[0] = mbuf_pool_create(mbuf_data_size,
1241 nb_mbuf_per_pool, 0);
1243 mempools[socket_num] = mbuf_pool_create
1251 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1252 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1254 * Records which Mbuf pool to use by each logical core, if needed.
1256 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1257 mbp = mbuf_pool_find(
1258 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1261 mbp = mbuf_pool_find(0);
1262 fwd_lcores[lc_id]->mbp = mbp;
1263 /* initialize GSO context */
1264 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1265 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1266 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1267 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1269 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1272 /* Configuration of packet forwarding streams. */
1273 if (init_fwd_streams() < 0)
1274 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1278 /* create a gro context for each lcore */
1279 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1280 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1281 gro_param.max_item_per_flow = MAX_PKT_BURST;
1282 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1283 gro_param.socket_id = rte_lcore_to_socket_id(
1284 fwd_lcores_cpuids[lc_id]);
1285 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1286 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1287 rte_exit(EXIT_FAILURE,
1288 "rte_gro_ctx_create() failed\n");
1292 #if defined RTE_LIBRTE_PMD_SOFTNIC
1293 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1294 RTE_ETH_FOREACH_DEV(pid) {
1296 const char *driver = port->dev_info.driver_name;
1298 if (strcmp(driver, "net_softnic") == 0)
1299 port->softport.fwd_lcore_arg = fwd_lcores;
1308 reconfig(portid_t new_port_id, unsigned socket_id)
1310 struct rte_port *port;
1313 /* Reconfiguration of Ethernet ports. */
1314 port = &ports[new_port_id];
1316 ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
1320 /* set flag to initialize port/queue */
1321 port->need_reconfig = 1;
1322 port->need_reconfig_queues = 1;
1323 port->socket_id = socket_id;
1330 init_fwd_streams(void)
1333 struct rte_port *port;
1334 streamid_t sm_id, nb_fwd_streams_new;
1337 /* set socket id according to numa or not */
1338 RTE_ETH_FOREACH_DEV(pid) {
1340 if (nb_rxq > port->dev_info.max_rx_queues) {
1341 printf("Fail: nb_rxq(%d) is greater than "
1342 "max_rx_queues(%d)\n", nb_rxq,
1343 port->dev_info.max_rx_queues);
1346 if (nb_txq > port->dev_info.max_tx_queues) {
1347 printf("Fail: nb_txq(%d) is greater than "
1348 "max_tx_queues(%d)\n", nb_txq,
1349 port->dev_info.max_tx_queues);
1353 if (port_numa[pid] != NUMA_NO_CONFIG)
1354 port->socket_id = port_numa[pid];
1356 port->socket_id = rte_eth_dev_socket_id(pid);
1359 * if socket_id is invalid,
1360 * set to the first available socket.
1362 if (check_socket_id(port->socket_id) < 0)
1363 port->socket_id = socket_ids[0];
1367 if (socket_num == UMA_NO_CONFIG)
1368 port->socket_id = 0;
1370 port->socket_id = socket_num;
1374 q = RTE_MAX(nb_rxq, nb_txq);
1376 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1379 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1380 if (nb_fwd_streams_new == nb_fwd_streams)
1383 if (fwd_streams != NULL) {
1384 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1385 if (fwd_streams[sm_id] == NULL)
1387 rte_free(fwd_streams[sm_id]);
1388 fwd_streams[sm_id] = NULL;
1390 rte_free(fwd_streams);
1395 nb_fwd_streams = nb_fwd_streams_new;
1396 if (nb_fwd_streams) {
1397 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1398 sizeof(struct fwd_stream *) * nb_fwd_streams,
1399 RTE_CACHE_LINE_SIZE);
1400 if (fwd_streams == NULL)
1401 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1402 " (struct fwd_stream *)) failed\n",
1405 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1406 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1407 " struct fwd_stream", sizeof(struct fwd_stream),
1408 RTE_CACHE_LINE_SIZE);
1409 if (fwd_streams[sm_id] == NULL)
1410 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1411 "(struct fwd_stream) failed\n");
1418 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1420 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1422 unsigned int total_burst;
1423 unsigned int nb_burst;
1424 unsigned int burst_stats[3];
1425 uint16_t pktnb_stats[3];
1427 int burst_percent[3];
1430 * First compute the total number of packet bursts and the
1431 * two highest numbers of bursts of the same number of packets.
1434 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1435 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1436 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1437 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1440 total_burst += nb_burst;
1441 if (nb_burst > burst_stats[0]) {
1442 burst_stats[1] = burst_stats[0];
1443 pktnb_stats[1] = pktnb_stats[0];
1444 burst_stats[0] = nb_burst;
1445 pktnb_stats[0] = nb_pkt;
1446 } else if (nb_burst > burst_stats[1]) {
1447 burst_stats[1] = nb_burst;
1448 pktnb_stats[1] = nb_pkt;
1451 if (total_burst == 0)
1453 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1454 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1455 burst_percent[0], (int) pktnb_stats[0]);
1456 if (burst_stats[0] == total_burst) {
1460 if (burst_stats[0] + burst_stats[1] == total_burst) {
1461 printf(" + %d%% of %d pkts]\n",
1462 100 - burst_percent[0], pktnb_stats[1]);
1465 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1466 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1467 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1468 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1471 printf(" + %d%% of %d pkts + %d%% of others]\n",
1472 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1474 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1477 fwd_stream_stats_display(streamid_t stream_id)
1479 struct fwd_stream *fs;
1480 static const char *fwd_top_stats_border = "-------";
1482 fs = fwd_streams[stream_id];
1483 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1484 (fs->fwd_dropped == 0))
1486 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1487 "TX Port=%2d/Queue=%2d %s\n",
1488 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1489 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1490 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1491 " TX-dropped: %-14"PRIu64,
1492 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1494 /* if checksum mode */
1495 if (cur_fwd_eng == &csum_fwd_engine) {
1496 printf(" RX- bad IP checksum: %-14"PRIu64
1497 " Rx- bad L4 checksum: %-14"PRIu64
1498 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1499 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1500 fs->rx_bad_outer_l4_csum);
1505 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1506 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1507 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1512 fwd_stats_display(void)
1514 static const char *fwd_stats_border = "----------------------";
1515 static const char *acc_stats_border = "+++++++++++++++";
1517 struct fwd_stream *rx_stream;
1518 struct fwd_stream *tx_stream;
1519 uint64_t tx_dropped;
1520 uint64_t rx_bad_ip_csum;
1521 uint64_t rx_bad_l4_csum;
1522 uint64_t rx_bad_outer_l4_csum;
1523 } ports_stats[RTE_MAX_ETHPORTS];
1524 uint64_t total_rx_dropped = 0;
1525 uint64_t total_tx_dropped = 0;
1526 uint64_t total_rx_nombuf = 0;
1527 struct rte_eth_stats stats;
1528 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1529 uint64_t fwd_cycles = 0;
1531 uint64_t total_recv = 0;
1532 uint64_t total_xmit = 0;
1533 struct rte_port *port;
1538 memset(ports_stats, 0, sizeof(ports_stats));
1540 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1541 struct fwd_stream *fs = fwd_streams[sm_id];
1543 if (cur_fwd_config.nb_fwd_streams >
1544 cur_fwd_config.nb_fwd_ports) {
1545 fwd_stream_stats_display(sm_id);
1547 ports_stats[fs->tx_port].tx_stream = fs;
1548 ports_stats[fs->rx_port].rx_stream = fs;
1551 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1553 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1554 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1555 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1556 fs->rx_bad_outer_l4_csum;
1558 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1559 fwd_cycles += fs->core_cycles;
1562 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1565 pt_id = fwd_ports_ids[i];
1566 port = &ports[pt_id];
1568 rte_eth_stats_get(pt_id, &stats);
1569 stats.ipackets -= port->stats.ipackets;
1570 stats.opackets -= port->stats.opackets;
1571 stats.ibytes -= port->stats.ibytes;
1572 stats.obytes -= port->stats.obytes;
1573 stats.imissed -= port->stats.imissed;
1574 stats.oerrors -= port->stats.oerrors;
1575 stats.rx_nombuf -= port->stats.rx_nombuf;
1577 total_recv += stats.ipackets;
1578 total_xmit += stats.opackets;
1579 total_rx_dropped += stats.imissed;
1580 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1581 total_tx_dropped += stats.oerrors;
1582 total_rx_nombuf += stats.rx_nombuf;
1584 printf("\n %s Forward statistics for port %-2d %s\n",
1585 fwd_stats_border, pt_id, fwd_stats_border);
1587 if (!port->rx_queue_stats_mapping_enabled &&
1588 !port->tx_queue_stats_mapping_enabled) {
1589 printf(" RX-packets: %-14"PRIu64
1590 " RX-dropped: %-14"PRIu64
1591 "RX-total: %-"PRIu64"\n",
1592 stats.ipackets, stats.imissed,
1593 stats.ipackets + stats.imissed);
1595 if (cur_fwd_eng == &csum_fwd_engine)
1596 printf(" Bad-ipcsum: %-14"PRIu64
1597 " Bad-l4csum: %-14"PRIu64
1598 "Bad-outer-l4csum: %-14"PRIu64"\n",
1599 ports_stats[pt_id].rx_bad_ip_csum,
1600 ports_stats[pt_id].rx_bad_l4_csum,
1601 ports_stats[pt_id].rx_bad_outer_l4_csum);
1602 if (stats.ierrors + stats.rx_nombuf > 0) {
1603 printf(" RX-error: %-"PRIu64"\n",
1605 printf(" RX-nombufs: %-14"PRIu64"\n",
1609 printf(" TX-packets: %-14"PRIu64
1610 " TX-dropped: %-14"PRIu64
1611 "TX-total: %-"PRIu64"\n",
1612 stats.opackets, ports_stats[pt_id].tx_dropped,
1613 stats.opackets + ports_stats[pt_id].tx_dropped);
1615 printf(" RX-packets: %14"PRIu64
1616 " RX-dropped:%14"PRIu64
1617 " RX-total:%14"PRIu64"\n",
1618 stats.ipackets, stats.imissed,
1619 stats.ipackets + stats.imissed);
1621 if (cur_fwd_eng == &csum_fwd_engine)
1622 printf(" Bad-ipcsum:%14"PRIu64
1623 " Bad-l4csum:%14"PRIu64
1624 " Bad-outer-l4csum: %-14"PRIu64"\n",
1625 ports_stats[pt_id].rx_bad_ip_csum,
1626 ports_stats[pt_id].rx_bad_l4_csum,
1627 ports_stats[pt_id].rx_bad_outer_l4_csum);
1628 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1629 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1630 printf(" RX-nombufs: %14"PRIu64"\n",
1634 printf(" TX-packets: %14"PRIu64
1635 " TX-dropped:%14"PRIu64
1636 " TX-total:%14"PRIu64"\n",
1637 stats.opackets, ports_stats[pt_id].tx_dropped,
1638 stats.opackets + ports_stats[pt_id].tx_dropped);
1641 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1642 if (ports_stats[pt_id].rx_stream)
1643 pkt_burst_stats_display("RX",
1644 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1645 if (ports_stats[pt_id].tx_stream)
1646 pkt_burst_stats_display("TX",
1647 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1650 if (port->rx_queue_stats_mapping_enabled) {
1652 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1653 printf(" Stats reg %2d RX-packets:%14"PRIu64
1654 " RX-errors:%14"PRIu64
1655 " RX-bytes:%14"PRIu64"\n",
1656 j, stats.q_ipackets[j],
1657 stats.q_errors[j], stats.q_ibytes[j]);
1661 if (port->tx_queue_stats_mapping_enabled) {
1662 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1663 printf(" Stats reg %2d TX-packets:%14"PRIu64
1666 j, stats.q_opackets[j],
1671 printf(" %s--------------------------------%s\n",
1672 fwd_stats_border, fwd_stats_border);
1675 printf("\n %s Accumulated forward statistics for all ports"
1677 acc_stats_border, acc_stats_border);
1678 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1680 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1682 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1683 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1684 if (total_rx_nombuf > 0)
1685 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1686 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1688 acc_stats_border, acc_stats_border);
1689 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1691 printf("\n CPU cycles/packet=%u (total cycles="
1692 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1693 (unsigned int)(fwd_cycles / total_recv),
1694 fwd_cycles, total_recv);
1699 fwd_stats_reset(void)
1705 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1706 pt_id = fwd_ports_ids[i];
1707 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1709 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1710 struct fwd_stream *fs = fwd_streams[sm_id];
1714 fs->fwd_dropped = 0;
1715 fs->rx_bad_ip_csum = 0;
1716 fs->rx_bad_l4_csum = 0;
1717 fs->rx_bad_outer_l4_csum = 0;
1719 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1720 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1721 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1723 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1724 fs->core_cycles = 0;
1730 flush_fwd_rx_queues(void)
1732 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1739 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1740 uint64_t timer_period;
1742 /* convert to number of cycles */
1743 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1745 for (j = 0; j < 2; j++) {
1746 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1747 for (rxq = 0; rxq < nb_rxq; rxq++) {
1748 port_id = fwd_ports_ids[rxp];
1750 * testpmd can stuck in the below do while loop
1751 * if rte_eth_rx_burst() always returns nonzero
1752 * packets. So timer is added to exit this loop
1753 * after 1sec timer expiry.
1755 prev_tsc = rte_rdtsc();
1757 nb_rx = rte_eth_rx_burst(port_id, rxq,
1758 pkts_burst, MAX_PKT_BURST);
1759 for (i = 0; i < nb_rx; i++)
1760 rte_pktmbuf_free(pkts_burst[i]);
1762 cur_tsc = rte_rdtsc();
1763 diff_tsc = cur_tsc - prev_tsc;
1764 timer_tsc += diff_tsc;
1765 } while ((nb_rx > 0) &&
1766 (timer_tsc < timer_period));
1770 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1775 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1777 struct fwd_stream **fsm;
1780 #ifdef RTE_LIBRTE_BITRATE
1781 uint64_t tics_per_1sec;
1782 uint64_t tics_datum;
1783 uint64_t tics_current;
1784 uint16_t i, cnt_ports;
1786 cnt_ports = nb_ports;
1787 tics_datum = rte_rdtsc();
1788 tics_per_1sec = rte_get_timer_hz();
1790 fsm = &fwd_streams[fc->stream_idx];
1791 nb_fs = fc->stream_nb;
1793 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1794 (*pkt_fwd)(fsm[sm_id]);
1795 #ifdef RTE_LIBRTE_BITRATE
1796 if (bitrate_enabled != 0 &&
1797 bitrate_lcore_id == rte_lcore_id()) {
1798 tics_current = rte_rdtsc();
1799 if (tics_current - tics_datum >= tics_per_1sec) {
1800 /* Periodic bitrate calculation */
1801 for (i = 0; i < cnt_ports; i++)
1802 rte_stats_bitrate_calc(bitrate_data,
1804 tics_datum = tics_current;
1808 #ifdef RTE_LIBRTE_LATENCY_STATS
1809 if (latencystats_enabled != 0 &&
1810 latencystats_lcore_id == rte_lcore_id())
1811 rte_latencystats_update();
1814 } while (! fc->stopped);
1818 start_pkt_forward_on_core(void *fwd_arg)
1820 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1821 cur_fwd_config.fwd_eng->packet_fwd);
1826 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1827 * Used to start communication flows in network loopback test configurations.
1830 run_one_txonly_burst_on_core(void *fwd_arg)
1832 struct fwd_lcore *fwd_lc;
1833 struct fwd_lcore tmp_lcore;
1835 fwd_lc = (struct fwd_lcore *) fwd_arg;
1836 tmp_lcore = *fwd_lc;
1837 tmp_lcore.stopped = 1;
1838 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1843 * Launch packet forwarding:
1844 * - Setup per-port forwarding context.
1845 * - launch logical cores with their forwarding configuration.
1848 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1850 port_fwd_begin_t port_fwd_begin;
1855 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1856 if (port_fwd_begin != NULL) {
1857 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1858 (*port_fwd_begin)(fwd_ports_ids[i]);
1860 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1861 lc_id = fwd_lcores_cpuids[i];
1862 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1863 fwd_lcores[i]->stopped = 0;
1864 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1865 fwd_lcores[i], lc_id);
1867 printf("launch lcore %u failed - diag=%d\n",
1874 * Launch packet forwarding configuration.
1877 start_packet_forwarding(int with_tx_first)
1879 port_fwd_begin_t port_fwd_begin;
1880 port_fwd_end_t port_fwd_end;
1881 struct rte_port *port;
1885 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1886 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1888 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1889 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1891 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1892 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1893 (!nb_rxq || !nb_txq))
1894 rte_exit(EXIT_FAILURE,
1895 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1896 cur_fwd_eng->fwd_mode_name);
1898 if (all_ports_started() == 0) {
1899 printf("Not all ports were started\n");
1902 if (test_done == 0) {
1903 printf("Packet forwarding already started\n");
1909 for (i = 0; i < nb_fwd_ports; i++) {
1910 pt_id = fwd_ports_ids[i];
1911 port = &ports[pt_id];
1912 if (!port->dcb_flag) {
1913 printf("In DCB mode, all forwarding ports must "
1914 "be configured in this mode.\n");
1918 if (nb_fwd_lcores == 1) {
1919 printf("In DCB mode,the nb forwarding cores "
1920 "should be larger than 1.\n");
1929 flush_fwd_rx_queues();
1931 pkt_fwd_config_display(&cur_fwd_config);
1932 rxtx_config_display();
1935 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1936 pt_id = fwd_ports_ids[i];
1937 port = &ports[pt_id];
1938 map_port_queue_stats_mapping_registers(pt_id, port);
1940 if (with_tx_first) {
1941 port_fwd_begin = tx_only_engine.port_fwd_begin;
1942 if (port_fwd_begin != NULL) {
1943 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1944 (*port_fwd_begin)(fwd_ports_ids[i]);
1946 while (with_tx_first--) {
1947 launch_packet_forwarding(
1948 run_one_txonly_burst_on_core);
1949 rte_eal_mp_wait_lcore();
1951 port_fwd_end = tx_only_engine.port_fwd_end;
1952 if (port_fwd_end != NULL) {
1953 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1954 (*port_fwd_end)(fwd_ports_ids[i]);
1957 launch_packet_forwarding(start_pkt_forward_on_core);
1961 stop_packet_forwarding(void)
1963 port_fwd_end_t port_fwd_end;
1969 printf("Packet forwarding not started\n");
1972 printf("Telling cores to stop...");
1973 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1974 fwd_lcores[lc_id]->stopped = 1;
1975 printf("\nWaiting for lcores to finish...\n");
1976 rte_eal_mp_wait_lcore();
1977 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1978 if (port_fwd_end != NULL) {
1979 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1980 pt_id = fwd_ports_ids[i];
1981 (*port_fwd_end)(pt_id);
1985 fwd_stats_display();
1987 printf("\nDone.\n");
1992 dev_set_link_up(portid_t pid)
1994 if (rte_eth_dev_set_link_up(pid) < 0)
1995 printf("\nSet link up fail.\n");
1999 dev_set_link_down(portid_t pid)
2001 if (rte_eth_dev_set_link_down(pid) < 0)
2002 printf("\nSet link down fail.\n");
2006 all_ports_started(void)
2009 struct rte_port *port;
2011 RTE_ETH_FOREACH_DEV(pi) {
2013 /* Check if there is a port which is not started */
2014 if ((port->port_status != RTE_PORT_STARTED) &&
2015 (port->slave_flag == 0))
2019 /* No port is not started */
2024 port_is_stopped(portid_t port_id)
2026 struct rte_port *port = &ports[port_id];
2028 if ((port->port_status != RTE_PORT_STOPPED) &&
2029 (port->slave_flag == 0))
2035 all_ports_stopped(void)
2039 RTE_ETH_FOREACH_DEV(pi) {
2040 if (!port_is_stopped(pi))
2048 port_is_started(portid_t port_id)
2050 if (port_id_is_invalid(port_id, ENABLED_WARN))
2053 if (ports[port_id].port_status != RTE_PORT_STARTED)
2060 start_port(portid_t pid)
2062 int diag, need_check_link_status = -1;
2065 struct rte_port *port;
2066 struct rte_ether_addr mac_addr;
2068 if (port_id_is_invalid(pid, ENABLED_WARN))
2073 RTE_ETH_FOREACH_DEV(pi) {
2074 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2077 need_check_link_status = 0;
2079 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2080 RTE_PORT_HANDLING) == 0) {
2081 printf("Port %d is now not stopped\n", pi);
2085 if (port->need_reconfig > 0) {
2086 port->need_reconfig = 0;
2088 if (flow_isolate_all) {
2089 int ret = port_flow_isolate(pi, 1);
2091 printf("Failed to apply isolated"
2092 " mode on port %d\n", pi);
2096 configure_rxtx_dump_callbacks(0);
2097 printf("Configuring Port %d (socket %u)\n", pi,
2099 /* configure port */
2100 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2103 if (rte_atomic16_cmpset(&(port->port_status),
2104 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2105 printf("Port %d can not be set back "
2106 "to stopped\n", pi);
2107 printf("Fail to configure port %d\n", pi);
2108 /* try to reconfigure port next time */
2109 port->need_reconfig = 1;
2113 if (port->need_reconfig_queues > 0) {
2114 port->need_reconfig_queues = 0;
2115 /* setup tx queues */
2116 for (qi = 0; qi < nb_txq; qi++) {
2117 if ((numa_support) &&
2118 (txring_numa[pi] != NUMA_NO_CONFIG))
2119 diag = rte_eth_tx_queue_setup(pi, qi,
2120 port->nb_tx_desc[qi],
2122 &(port->tx_conf[qi]));
2124 diag = rte_eth_tx_queue_setup(pi, qi,
2125 port->nb_tx_desc[qi],
2127 &(port->tx_conf[qi]));
2132 /* Fail to setup tx queue, return */
2133 if (rte_atomic16_cmpset(&(port->port_status),
2135 RTE_PORT_STOPPED) == 0)
2136 printf("Port %d can not be set back "
2137 "to stopped\n", pi);
2138 printf("Fail to configure port %d tx queues\n",
2140 /* try to reconfigure queues next time */
2141 port->need_reconfig_queues = 1;
2144 for (qi = 0; qi < nb_rxq; qi++) {
2145 /* setup rx queues */
2146 if ((numa_support) &&
2147 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2148 struct rte_mempool * mp =
2149 mbuf_pool_find(rxring_numa[pi]);
2151 printf("Failed to setup RX queue:"
2152 "No mempool allocation"
2153 " on the socket %d\n",
2158 diag = rte_eth_rx_queue_setup(pi, qi,
2159 port->nb_rx_desc[qi],
2161 &(port->rx_conf[qi]),
2164 struct rte_mempool *mp =
2165 mbuf_pool_find(port->socket_id);
2167 printf("Failed to setup RX queue:"
2168 "No mempool allocation"
2169 " on the socket %d\n",
2173 diag = rte_eth_rx_queue_setup(pi, qi,
2174 port->nb_rx_desc[qi],
2176 &(port->rx_conf[qi]),
2182 /* Fail to setup rx queue, return */
2183 if (rte_atomic16_cmpset(&(port->port_status),
2185 RTE_PORT_STOPPED) == 0)
2186 printf("Port %d can not be set back "
2187 "to stopped\n", pi);
2188 printf("Fail to configure port %d rx queues\n",
2190 /* try to reconfigure queues next time */
2191 port->need_reconfig_queues = 1;
2195 configure_rxtx_dump_callbacks(verbose_level);
2197 if (rte_eth_dev_start(pi) < 0) {
2198 printf("Fail to start port %d\n", pi);
2200 /* Fail to setup rx queue, return */
2201 if (rte_atomic16_cmpset(&(port->port_status),
2202 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2203 printf("Port %d can not be set back to "
2208 if (rte_atomic16_cmpset(&(port->port_status),
2209 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2210 printf("Port %d can not be set into started\n", pi);
2212 rte_eth_macaddr_get(pi, &mac_addr);
2213 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2214 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2215 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2216 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2218 /* at least one port started, need checking link status */
2219 need_check_link_status = 1;
2222 if (need_check_link_status == 1 && !no_link_check)
2223 check_all_ports_link_status(RTE_PORT_ALL);
2224 else if (need_check_link_status == 0)
2225 printf("Please stop the ports first\n");
2232 stop_port(portid_t pid)
2235 struct rte_port *port;
2236 int need_check_link_status = 0;
2243 if (port_id_is_invalid(pid, ENABLED_WARN))
2246 printf("Stopping ports...\n");
2248 RTE_ETH_FOREACH_DEV(pi) {
2249 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2252 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2253 printf("Please remove port %d from forwarding configuration.\n", pi);
2257 if (port_is_bonding_slave(pi)) {
2258 printf("Please remove port %d from bonded device.\n", pi);
2263 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2264 RTE_PORT_HANDLING) == 0)
2267 rte_eth_dev_stop(pi);
2269 if (rte_atomic16_cmpset(&(port->port_status),
2270 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2271 printf("Port %d can not be set into stopped\n", pi);
2272 need_check_link_status = 1;
2274 if (need_check_link_status && !no_link_check)
2275 check_all_ports_link_status(RTE_PORT_ALL);
2281 remove_invalid_ports_in(portid_t *array, portid_t *total)
2284 portid_t new_total = 0;
2286 for (i = 0; i < *total; i++)
2287 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2288 array[new_total] = array[i];
2295 remove_invalid_ports(void)
2297 remove_invalid_ports_in(ports_ids, &nb_ports);
2298 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2299 nb_cfg_ports = nb_fwd_ports;
2303 close_port(portid_t pid)
2306 struct rte_port *port;
2308 if (port_id_is_invalid(pid, ENABLED_WARN))
2311 printf("Closing ports...\n");
2313 RTE_ETH_FOREACH_DEV(pi) {
2314 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2317 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2318 printf("Please remove port %d from forwarding configuration.\n", pi);
2322 if (port_is_bonding_slave(pi)) {
2323 printf("Please remove port %d from bonded device.\n", pi);
2328 if (rte_atomic16_cmpset(&(port->port_status),
2329 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2330 printf("Port %d is already closed\n", pi);
2334 if (rte_atomic16_cmpset(&(port->port_status),
2335 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2336 printf("Port %d is now not stopped\n", pi);
2340 if (port->flow_list)
2341 port_flow_flush(pi);
2342 rte_eth_dev_close(pi);
2344 remove_invalid_ports();
2346 if (rte_atomic16_cmpset(&(port->port_status),
2347 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2348 printf("Port %d cannot be set to closed\n", pi);
2355 reset_port(portid_t pid)
2359 struct rte_port *port;
2361 if (port_id_is_invalid(pid, ENABLED_WARN))
2364 printf("Resetting ports...\n");
2366 RTE_ETH_FOREACH_DEV(pi) {
2367 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2370 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2371 printf("Please remove port %d from forwarding "
2372 "configuration.\n", pi);
2376 if (port_is_bonding_slave(pi)) {
2377 printf("Please remove port %d from bonded device.\n",
2382 diag = rte_eth_dev_reset(pi);
2385 port->need_reconfig = 1;
2386 port->need_reconfig_queues = 1;
2388 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2396 attach_port(char *identifier)
2399 struct rte_dev_iterator iterator;
2401 printf("Attaching a new port...\n");
2403 if (identifier == NULL) {
2404 printf("Invalid parameters are specified\n");
2408 if (rte_dev_probe(identifier) < 0) {
2409 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2413 /* first attach mode: event */
2414 if (setup_on_probe_event) {
2415 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2416 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2417 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2418 ports[pi].need_setup != 0)
2419 setup_attached_port(pi);
2423 /* second attach mode: iterator */
2424 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2425 /* setup ports matching the devargs used for probing */
2426 if (port_is_forwarding(pi))
2427 continue; /* port was already attached before */
2428 setup_attached_port(pi);
2433 setup_attached_port(portid_t pi)
2435 unsigned int socket_id;
2438 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2439 /* if socket_id is invalid, set to the first available socket. */
2440 if (check_socket_id(socket_id) < 0)
2441 socket_id = socket_ids[0];
2442 reconfig(pi, socket_id);
2443 ret = rte_eth_promiscuous_enable(pi);
2445 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
2446 pi, rte_strerror(-ret));
2448 ports_ids[nb_ports++] = pi;
2449 fwd_ports_ids[nb_fwd_ports++] = pi;
2450 nb_cfg_ports = nb_fwd_ports;
2451 ports[pi].need_setup = 0;
2452 ports[pi].port_status = RTE_PORT_STOPPED;
2454 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2459 detach_port_device(portid_t port_id)
2461 struct rte_device *dev;
2464 printf("Removing a device...\n");
2466 dev = rte_eth_devices[port_id].device;
2468 printf("Device already removed\n");
2472 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2473 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2474 printf("Port not stopped\n");
2477 printf("Port was not closed\n");
2478 if (ports[port_id].flow_list)
2479 port_flow_flush(port_id);
2482 if (rte_dev_remove(dev) < 0) {
2483 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2486 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2487 /* reset mapping between old ports and removed device */
2488 rte_eth_devices[sibling].device = NULL;
2489 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2490 /* sibling ports are forced to be closed */
2491 ports[sibling].port_status = RTE_PORT_CLOSED;
2492 printf("Port %u is closed\n", sibling);
2496 remove_invalid_ports();
2498 printf("Device of port %u is detached\n", port_id);
2499 printf("Now total ports is %d\n", nb_ports);
2505 detach_device(char *identifier)
2507 struct rte_dev_iterator iterator;
2508 struct rte_devargs da;
2511 printf("Removing a device...\n");
2513 memset(&da, 0, sizeof(da));
2514 if (rte_devargs_parsef(&da, "%s", identifier)) {
2515 printf("cannot parse identifier\n");
2521 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
2522 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2523 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2524 printf("Port %u not stopped\n", port_id);
2528 /* sibling ports are forced to be closed */
2529 if (ports[port_id].flow_list)
2530 port_flow_flush(port_id);
2531 ports[port_id].port_status = RTE_PORT_CLOSED;
2532 printf("Port %u is now closed\n", port_id);
2536 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
2537 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
2538 da.name, da.bus->name);
2542 remove_invalid_ports();
2544 printf("Device %s is detached\n", identifier);
2545 printf("Now total ports is %d\n", nb_ports);
2557 stop_packet_forwarding();
2559 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2561 if (mp_alloc_type == MP_ALLOC_ANON)
2562 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2566 if (ports != NULL) {
2568 RTE_ETH_FOREACH_DEV(pt_id) {
2569 printf("\nStopping port %d...\n", pt_id);
2573 RTE_ETH_FOREACH_DEV(pt_id) {
2574 printf("\nShutting down port %d...\n", pt_id);
2581 ret = rte_dev_event_monitor_stop();
2584 "fail to stop device event monitor.");
2588 ret = rte_dev_event_callback_unregister(NULL,
2589 dev_event_callback, NULL);
2592 "fail to unregister device event callback.\n");
2596 ret = rte_dev_hotplug_handle_disable();
2599 "fail to disable hotplug handling.\n");
2603 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2605 rte_mempool_free(mempools[i]);
2608 printf("\nBye...\n");
2611 typedef void (*cmd_func_t)(void);
2612 struct pmd_test_command {
2613 const char *cmd_name;
2614 cmd_func_t cmd_func;
2617 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2619 /* Check the link status of all ports in up to 9s, and print them finally */
2621 check_all_ports_link_status(uint32_t port_mask)
2623 #define CHECK_INTERVAL 100 /* 100ms */
2624 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2626 uint8_t count, all_ports_up, print_flag = 0;
2627 struct rte_eth_link link;
2629 printf("Checking link statuses...\n");
2631 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2633 RTE_ETH_FOREACH_DEV(portid) {
2634 if ((port_mask & (1 << portid)) == 0)
2636 memset(&link, 0, sizeof(link));
2637 rte_eth_link_get_nowait(portid, &link);
2638 /* print link status if flag set */
2639 if (print_flag == 1) {
2640 if (link.link_status)
2642 "Port%d Link Up. speed %u Mbps- %s\n",
2643 portid, link.link_speed,
2644 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2645 ("full-duplex") : ("half-duplex\n"));
2647 printf("Port %d Link Down\n", portid);
2650 /* clear all_ports_up flag if any link down */
2651 if (link.link_status == ETH_LINK_DOWN) {
2656 /* after finally printing all link status, get out */
2657 if (print_flag == 1)
2660 if (all_ports_up == 0) {
2662 rte_delay_ms(CHECK_INTERVAL);
2665 /* set the print_flag if all ports up or timeout */
2666 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2676 * This callback is for remove a port for a device. It has limitation because
2677 * it is not for multiple port removal for a device.
2678 * TODO: the device detach invoke will plan to be removed from user side to
2679 * eal. And convert all PMDs to free port resources on ether device closing.
2682 rmv_port_callback(void *arg)
2684 int need_to_start = 0;
2685 int org_no_link_check = no_link_check;
2686 portid_t port_id = (intptr_t)arg;
2688 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2690 if (!test_done && port_is_forwarding(port_id)) {
2692 stop_packet_forwarding();
2696 no_link_check = org_no_link_check;
2697 close_port(port_id);
2698 detach_port_device(port_id);
2700 start_packet_forwarding(0);
2703 /* This function is used by the interrupt thread */
2705 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2708 RTE_SET_USED(param);
2709 RTE_SET_USED(ret_param);
2711 if (type >= RTE_ETH_EVENT_MAX) {
2712 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2713 port_id, __func__, type);
2715 } else if (event_print_mask & (UINT32_C(1) << type)) {
2716 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2717 eth_event_desc[type]);
2722 case RTE_ETH_EVENT_NEW:
2723 ports[port_id].need_setup = 1;
2724 ports[port_id].port_status = RTE_PORT_HANDLING;
2726 case RTE_ETH_EVENT_INTR_RMV:
2727 if (port_id_is_invalid(port_id, DISABLED_WARN))
2729 if (rte_eal_alarm_set(100000,
2730 rmv_port_callback, (void *)(intptr_t)port_id))
2731 fprintf(stderr, "Could not set up deferred device removal\n");
2740 register_eth_event_callback(void)
2743 enum rte_eth_event_type event;
2745 for (event = RTE_ETH_EVENT_UNKNOWN;
2746 event < RTE_ETH_EVENT_MAX; event++) {
2747 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2752 TESTPMD_LOG(ERR, "Failed to register callback for "
2753 "%s event\n", eth_event_desc[event]);
2761 /* This function is used by the interrupt thread */
2763 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2764 __rte_unused void *arg)
2769 if (type >= RTE_DEV_EVENT_MAX) {
2770 fprintf(stderr, "%s called upon invalid event %d\n",
2776 case RTE_DEV_EVENT_REMOVE:
2777 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2779 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2781 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2786 * Because the user's callback is invoked in eal interrupt
2787 * callback, the interrupt callback need to be finished before
2788 * it can be unregistered when detaching device. So finish
2789 * callback soon and use a deferred removal to detach device
2790 * is need. It is a workaround, once the device detaching be
2791 * moved into the eal in the future, the deferred removal could
2794 if (rte_eal_alarm_set(100000,
2795 rmv_port_callback, (void *)(intptr_t)port_id))
2797 "Could not set up deferred device removal\n");
2799 case RTE_DEV_EVENT_ADD:
2800 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2802 /* TODO: After finish kernel driver binding,
2803 * begin to attach port.
2812 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2816 uint8_t mapping_found = 0;
2818 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2819 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2820 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2821 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2822 tx_queue_stats_mappings[i].queue_id,
2823 tx_queue_stats_mappings[i].stats_counter_id);
2830 port->tx_queue_stats_mapping_enabled = 1;
2835 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2839 uint8_t mapping_found = 0;
2841 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2842 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2843 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2844 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2845 rx_queue_stats_mappings[i].queue_id,
2846 rx_queue_stats_mappings[i].stats_counter_id);
2853 port->rx_queue_stats_mapping_enabled = 1;
2858 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2862 diag = set_tx_queue_stats_mapping_registers(pi, port);
2864 if (diag == -ENOTSUP) {
2865 port->tx_queue_stats_mapping_enabled = 0;
2866 printf("TX queue stats mapping not supported port id=%d\n", pi);
2869 rte_exit(EXIT_FAILURE,
2870 "set_tx_queue_stats_mapping_registers "
2871 "failed for port id=%d diag=%d\n",
2875 diag = set_rx_queue_stats_mapping_registers(pi, port);
2877 if (diag == -ENOTSUP) {
2878 port->rx_queue_stats_mapping_enabled = 0;
2879 printf("RX queue stats mapping not supported port id=%d\n", pi);
2882 rte_exit(EXIT_FAILURE,
2883 "set_rx_queue_stats_mapping_registers "
2884 "failed for port id=%d diag=%d\n",
2890 rxtx_port_config(struct rte_port *port)
2895 for (qid = 0; qid < nb_rxq; qid++) {
2896 offloads = port->rx_conf[qid].offloads;
2897 port->rx_conf[qid] = port->dev_info.default_rxconf;
2899 port->rx_conf[qid].offloads = offloads;
2901 /* Check if any Rx parameters have been passed */
2902 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2903 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2905 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2906 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2908 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2909 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2911 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2912 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2914 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2915 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2917 port->nb_rx_desc[qid] = nb_rxd;
2920 for (qid = 0; qid < nb_txq; qid++) {
2921 offloads = port->tx_conf[qid].offloads;
2922 port->tx_conf[qid] = port->dev_info.default_txconf;
2924 port->tx_conf[qid].offloads = offloads;
2926 /* Check if any Tx parameters have been passed */
2927 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2928 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2930 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2931 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2933 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2934 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2936 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2937 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2939 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2940 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2942 port->nb_tx_desc[qid] = nb_txd;
2947 init_port_config(void)
2950 struct rte_port *port;
2953 RTE_ETH_FOREACH_DEV(pid) {
2955 port->dev_conf.fdir_conf = fdir_conf;
2957 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
2962 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2963 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2964 rss_hf & port->dev_info.flow_type_rss_offloads;
2966 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2967 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2970 if (port->dcb_flag == 0) {
2971 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2972 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2974 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2977 rxtx_port_config(port);
2979 rte_eth_macaddr_get(pid, &port->eth_addr);
2981 map_port_queue_stats_mapping_registers(pid, port);
2982 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2983 rte_pmd_ixgbe_bypass_init(pid);
2986 if (lsc_interrupt &&
2987 (rte_eth_devices[pid].data->dev_flags &
2988 RTE_ETH_DEV_INTR_LSC))
2989 port->dev_conf.intr_conf.lsc = 1;
2990 if (rmv_interrupt &&
2991 (rte_eth_devices[pid].data->dev_flags &
2992 RTE_ETH_DEV_INTR_RMV))
2993 port->dev_conf.intr_conf.rmv = 1;
2997 void set_port_slave_flag(portid_t slave_pid)
2999 struct rte_port *port;
3001 port = &ports[slave_pid];
3002 port->slave_flag = 1;
3005 void clear_port_slave_flag(portid_t slave_pid)
3007 struct rte_port *port;
3009 port = &ports[slave_pid];
3010 port->slave_flag = 0;
3013 uint8_t port_is_bonding_slave(portid_t slave_pid)
3015 struct rte_port *port;
3017 port = &ports[slave_pid];
3018 if ((rte_eth_devices[slave_pid].data->dev_flags &
3019 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3024 const uint16_t vlan_tags[] = {
3025 0, 1, 2, 3, 4, 5, 6, 7,
3026 8, 9, 10, 11, 12, 13, 14, 15,
3027 16, 17, 18, 19, 20, 21, 22, 23,
3028 24, 25, 26, 27, 28, 29, 30, 31
3032 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3033 enum dcb_mode_enable dcb_mode,
3034 enum rte_eth_nb_tcs num_tcs,
3039 struct rte_eth_rss_conf rss_conf;
3042 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3043 * given above, and the number of traffic classes available for use.
3045 if (dcb_mode == DCB_VT_ENABLED) {
3046 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3047 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3048 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3049 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3051 /* VMDQ+DCB RX and TX configurations */
3052 vmdq_rx_conf->enable_default_pool = 0;
3053 vmdq_rx_conf->default_pool = 0;
3054 vmdq_rx_conf->nb_queue_pools =
3055 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3056 vmdq_tx_conf->nb_queue_pools =
3057 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3059 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3060 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3061 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3062 vmdq_rx_conf->pool_map[i].pools =
3063 1 << (i % vmdq_rx_conf->nb_queue_pools);
3065 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3066 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3067 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3070 /* set DCB mode of RX and TX of multiple queues */
3071 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
3072 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3074 struct rte_eth_dcb_rx_conf *rx_conf =
3075 ð_conf->rx_adv_conf.dcb_rx_conf;
3076 struct rte_eth_dcb_tx_conf *tx_conf =
3077 ð_conf->tx_adv_conf.dcb_tx_conf;
3079 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3083 rx_conf->nb_tcs = num_tcs;
3084 tx_conf->nb_tcs = num_tcs;
3086 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3087 rx_conf->dcb_tc[i] = i % num_tcs;
3088 tx_conf->dcb_tc[i] = i % num_tcs;
3091 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3092 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3093 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3097 eth_conf->dcb_capability_en =
3098 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3100 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3106 init_port_dcb_config(portid_t pid,
3107 enum dcb_mode_enable dcb_mode,
3108 enum rte_eth_nb_tcs num_tcs,
3111 struct rte_eth_conf port_conf;
3112 struct rte_port *rte_port;
3116 rte_port = &ports[pid];
3118 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3119 /* Enter DCB configuration status */
3122 port_conf.rxmode = rte_port->dev_conf.rxmode;
3123 port_conf.txmode = rte_port->dev_conf.txmode;
3125 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3126 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3129 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3131 /* re-configure the device . */
3132 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3136 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3140 /* If dev_info.vmdq_pool_base is greater than 0,
3141 * the queue id of vmdq pools is started after pf queues.
3143 if (dcb_mode == DCB_VT_ENABLED &&
3144 rte_port->dev_info.vmdq_pool_base > 0) {
3145 printf("VMDQ_DCB multi-queue mode is nonsensical"
3146 " for port %d.", pid);
3150 /* Assume the ports in testpmd have the same dcb capability
3151 * and has the same number of rxq and txq in dcb mode
3153 if (dcb_mode == DCB_VT_ENABLED) {
3154 if (rte_port->dev_info.max_vfs > 0) {
3155 nb_rxq = rte_port->dev_info.nb_rx_queues;
3156 nb_txq = rte_port->dev_info.nb_tx_queues;
3158 nb_rxq = rte_port->dev_info.max_rx_queues;
3159 nb_txq = rte_port->dev_info.max_tx_queues;
3162 /*if vt is disabled, use all pf queues */
3163 if (rte_port->dev_info.vmdq_pool_base == 0) {
3164 nb_rxq = rte_port->dev_info.max_rx_queues;
3165 nb_txq = rte_port->dev_info.max_tx_queues;
3167 nb_rxq = (queueid_t)num_tcs;
3168 nb_txq = (queueid_t)num_tcs;
3172 rx_free_thresh = 64;
3174 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3176 rxtx_port_config(rte_port);
3178 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3179 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3180 rx_vft_set(pid, vlan_tags[i], 1);
3182 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3183 map_port_queue_stats_mapping_registers(pid, rte_port);
3185 rte_port->dcb_flag = 1;
3193 /* Configuration of Ethernet ports. */
3194 ports = rte_zmalloc("testpmd: ports",
3195 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3196 RTE_CACHE_LINE_SIZE);
3197 if (ports == NULL) {
3198 rte_exit(EXIT_FAILURE,
3199 "rte_zmalloc(%d struct rte_port) failed\n",
3203 /* Initialize ports NUMA structures */
3204 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3205 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3206 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3220 const char clr[] = { 27, '[', '2', 'J', '\0' };
3221 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3223 /* Clear screen and move to top left */
3224 printf("%s%s", clr, top_left);
3226 printf("\nPort statistics ====================================");
3227 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3228 nic_stats_display(fwd_ports_ids[i]);
3234 signal_handler(int signum)
3236 if (signum == SIGINT || signum == SIGTERM) {
3237 printf("\nSignal %d received, preparing to exit...\n",
3239 #ifdef RTE_LIBRTE_PDUMP
3240 /* uninitialize packet capture framework */
3243 #ifdef RTE_LIBRTE_LATENCY_STATS
3244 if (latencystats_enabled != 0)
3245 rte_latencystats_uninit();
3248 /* Set flag to indicate the force termination. */
3250 /* exit with the expected status */
3251 signal(signum, SIG_DFL);
3252 kill(getpid(), signum);
3257 main(int argc, char** argv)
3264 signal(SIGINT, signal_handler);
3265 signal(SIGTERM, signal_handler);
3267 testpmd_logtype = rte_log_register("testpmd");
3268 if (testpmd_logtype < 0)
3269 rte_exit(EXIT_FAILURE, "Cannot register log type");
3270 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3272 diag = rte_eal_init(argc, argv);
3274 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3275 rte_strerror(rte_errno));
3277 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
3278 rte_exit(EXIT_FAILURE,
3279 "Secondary process type not supported.\n");
3281 ret = register_eth_event_callback();
3283 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3285 #ifdef RTE_LIBRTE_PDUMP
3286 /* initialize packet capture framework */
3291 RTE_ETH_FOREACH_DEV(port_id) {
3292 ports_ids[count] = port_id;
3295 nb_ports = (portid_t) count;
3297 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3299 /* allocate port structures, and init them */
3302 set_def_fwd_config();
3304 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3305 "Check the core mask argument\n");
3307 /* Bitrate/latency stats disabled by default */
3308 #ifdef RTE_LIBRTE_BITRATE
3309 bitrate_enabled = 0;
3311 #ifdef RTE_LIBRTE_LATENCY_STATS
3312 latencystats_enabled = 0;
3315 /* on FreeBSD, mlockall() is disabled by default */
3316 #ifdef RTE_EXEC_ENV_FREEBSD
3325 launch_args_parse(argc, argv);
3327 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3328 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3332 if (tx_first && interactive)
3333 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3334 "interactive mode.\n");
3336 if (tx_first && lsc_interrupt) {
3337 printf("Warning: lsc_interrupt needs to be off when "
3338 " using tx_first. Disabling.\n");
3342 if (!nb_rxq && !nb_txq)
3343 printf("Warning: Either rx or tx queues should be non-zero\n");
3345 if (nb_rxq > 1 && nb_rxq > nb_txq)
3346 printf("Warning: nb_rxq=%d enables RSS configuration, "
3347 "but nb_txq=%d will prevent to fully test it.\n",
3353 ret = rte_dev_hotplug_handle_enable();
3356 "fail to enable hotplug handling.");
3360 ret = rte_dev_event_monitor_start();
3363 "fail to start device event monitoring.");
3367 ret = rte_dev_event_callback_register(NULL,
3368 dev_event_callback, NULL);
3371 "fail to register device event callback\n");
3376 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3377 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3379 /* set all ports to promiscuous mode by default */
3380 RTE_ETH_FOREACH_DEV(port_id) {
3381 ret = rte_eth_promiscuous_enable(port_id);
3383 printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
3384 port_id, rte_strerror(-ret));
3387 /* Init metrics library */
3388 rte_metrics_init(rte_socket_id());
3390 #ifdef RTE_LIBRTE_LATENCY_STATS
3391 if (latencystats_enabled != 0) {
3392 int ret = rte_latencystats_init(1, NULL);
3394 printf("Warning: latencystats init()"
3395 " returned error %d\n", ret);
3396 printf("Latencystats running on lcore %d\n",
3397 latencystats_lcore_id);
3401 /* Setup bitrate stats */
3402 #ifdef RTE_LIBRTE_BITRATE
3403 if (bitrate_enabled != 0) {
3404 bitrate_data = rte_stats_bitrate_create();
3405 if (bitrate_data == NULL)
3406 rte_exit(EXIT_FAILURE,
3407 "Could not allocate bitrate data.\n");
3408 rte_stats_bitrate_reg(bitrate_data);
3412 #ifdef RTE_LIBRTE_CMDLINE
3413 if (strlen(cmdline_filename) != 0)
3414 cmdline_read_from_file(cmdline_filename);
3416 if (interactive == 1) {
3418 printf("Start automatic packet forwarding\n");
3419 start_packet_forwarding(0);
3431 printf("No commandline core given, start packet forwarding\n");
3432 start_packet_forwarding(tx_first);
3433 if (stats_period != 0) {
3434 uint64_t prev_time = 0, cur_time, diff_time = 0;
3435 uint64_t timer_period;
3437 /* Convert to number of cycles */
3438 timer_period = stats_period * rte_get_timer_hz();
3440 while (f_quit == 0) {
3441 cur_time = rte_get_timer_cycles();
3442 diff_time += cur_time - prev_time;
3444 if (diff_time >= timer_period) {
3446 /* Reset the timer */
3449 /* Sleep to avoid unnecessary checks */
3450 prev_time = cur_time;
3455 printf("Press enter to exit\n");
3456 rc = read(0, &c, 1);