1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
242 * Configurable number of RX/TX ring descriptors.
243 * Defaults are supplied by drivers via ethdev.
245 #define RTE_TEST_RX_DESC_DEFAULT 0
246 #define RTE_TEST_TX_DESC_DEFAULT 0
247 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250 #define RTE_PMD_PARAM_UNSET -1
252 * Configurable values of RX and TX ring threshold registers.
255 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
264 * Configurable value of RX free threshold.
266 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX drop enable.
271 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
274 * Configurable value of TX free threshold.
276 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX RS bit threshold.
281 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of buffered packets before sending.
286 uint16_t noisy_tx_sw_bufsz;
289 * Configurable value of packet buffer timeout.
291 uint16_t noisy_tx_sw_buf_flush_time;
294 * Configurable value for size of VNF internal memory area
295 * used for simulating noisy neighbour behaviour
297 uint64_t noisy_lkup_mem_sz;
300 * Configurable value of number of random writes done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_writes;
306 * Configurable value of number of random reads done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads;
312 * Configurable value of number of random reads/writes done in
313 * VNF simulation memory area.
315 uint64_t noisy_lkup_num_reads_writes;
318 * Receive Side Scaling (RSS) configuration.
320 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
323 * Port topology configuration
325 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
328 * Avoids to flush all the RX streams before starts forwarding.
330 uint8_t no_flush_rx = 0; /* flush by default */
333 * Flow API isolated mode.
335 uint8_t flow_isolate_all;
338 * Avoids to check link status when starting/stopping a port.
340 uint8_t no_link_check = 0; /* check by default */
343 * Enable link status change notification
345 uint8_t lsc_interrupt = 1; /* enabled by default */
348 * Enable device removal notification.
350 uint8_t rmv_interrupt = 1; /* enabled by default */
352 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
354 /* After attach, port setup is called on event or by iterator */
355 bool setup_on_probe_event = true;
357 /* Pretty printing of ethdev events */
358 static const char * const eth_event_desc[] = {
359 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
360 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
361 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
362 [RTE_ETH_EVENT_INTR_RESET] = "reset",
363 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
364 [RTE_ETH_EVENT_IPSEC] = "IPsec",
365 [RTE_ETH_EVENT_MACSEC] = "MACsec",
366 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
367 [RTE_ETH_EVENT_NEW] = "device probed",
368 [RTE_ETH_EVENT_DESTROY] = "device released",
369 [RTE_ETH_EVENT_MAX] = NULL,
373 * Display or mask ether events
374 * Default to all events except VF_MBOX
376 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
377 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
378 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
379 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
380 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
381 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
382 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
384 * Decide if all memory are locked for performance.
389 * NIC bypass mode configuration options.
392 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
393 /* The NIC bypass watchdog timeout. */
394 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
398 #ifdef RTE_LIBRTE_LATENCY_STATS
401 * Set when latency stats is enabled in the commandline
403 uint8_t latencystats_enabled;
406 * Lcore ID to serive latency statistics.
408 lcoreid_t latencystats_lcore_id = -1;
413 * Ethernet device configuration.
415 struct rte_eth_rxmode rx_mode = {
416 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
417 /**< Default maximum frame length. */
420 struct rte_eth_txmode tx_mode = {
421 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
424 struct rte_fdir_conf fdir_conf = {
425 .mode = RTE_FDIR_MODE_NONE,
426 .pballoc = RTE_FDIR_PBALLOC_64K,
427 .status = RTE_FDIR_REPORT_STATUS,
429 .vlan_tci_mask = 0xFFEF,
431 .src_ip = 0xFFFFFFFF,
432 .dst_ip = 0xFFFFFFFF,
435 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
436 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
438 .src_port_mask = 0xFFFF,
439 .dst_port_mask = 0xFFFF,
440 .mac_addr_byte_mask = 0xFF,
441 .tunnel_type_mask = 1,
442 .tunnel_id_mask = 0xFFFFFFFF,
447 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
449 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
450 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
452 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
453 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
455 uint16_t nb_tx_queue_stats_mappings = 0;
456 uint16_t nb_rx_queue_stats_mappings = 0;
459 * Display zero values by default for xstats
461 uint8_t xstats_hide_zero;
463 unsigned int num_sockets = 0;
464 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
466 #ifdef RTE_LIBRTE_BITRATE
467 /* Bitrate statistics */
468 struct rte_stats_bitrates *bitrate_data;
469 lcoreid_t bitrate_lcore_id;
470 uint8_t bitrate_enabled;
473 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
474 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
476 struct vxlan_encap_conf vxlan_encap_conf = {
480 .vni = "\x00\x00\x00",
482 .udp_dst = RTE_BE16(4789),
483 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
484 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
485 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
486 "\x00\x00\x00\x00\x00\x00\x00\x01",
487 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
488 "\x00\x00\x00\x00\x00\x00\x11\x11",
492 .eth_src = "\x00\x00\x00\x00\x00\x00",
493 .eth_dst = "\xff\xff\xff\xff\xff\xff",
496 struct nvgre_encap_conf nvgre_encap_conf = {
499 .tni = "\x00\x00\x00",
500 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
501 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
502 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
503 "\x00\x00\x00\x00\x00\x00\x00\x01",
504 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
505 "\x00\x00\x00\x00\x00\x00\x11\x11",
507 .eth_src = "\x00\x00\x00\x00\x00\x00",
508 .eth_dst = "\xff\xff\xff\xff\xff\xff",
511 /* Forward function declarations */
512 static void setup_attached_port(portid_t pi);
513 static void map_port_queue_stats_mapping_registers(portid_t pi,
514 struct rte_port *port);
515 static void check_all_ports_link_status(uint32_t port_mask);
516 static int eth_event_callback(portid_t port_id,
517 enum rte_eth_event_type type,
518 void *param, void *ret_param);
519 static void dev_event_callback(const char *device_name,
520 enum rte_dev_event_type type,
524 * Check if all the ports are started.
525 * If yes, return positive value. If not, return zero.
527 static int all_ports_started(void);
529 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
530 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
533 * Helper function to check if socket is already discovered.
534 * If yes, return positive value. If not, return zero.
537 new_socket_id(unsigned int socket_id)
541 for (i = 0; i < num_sockets; i++) {
542 if (socket_ids[i] == socket_id)
549 * Setup default configuration.
552 set_default_fwd_lcores_config(void)
556 unsigned int sock_num;
559 for (i = 0; i < RTE_MAX_LCORE; i++) {
560 if (!rte_lcore_is_enabled(i))
562 sock_num = rte_lcore_to_socket_id(i);
563 if (new_socket_id(sock_num)) {
564 if (num_sockets >= RTE_MAX_NUMA_NODES) {
565 rte_exit(EXIT_FAILURE,
566 "Total sockets greater than %u\n",
569 socket_ids[num_sockets++] = sock_num;
571 if (i == rte_get_master_lcore())
573 fwd_lcores_cpuids[nb_lc++] = i;
575 nb_lcores = (lcoreid_t) nb_lc;
576 nb_cfg_lcores = nb_lcores;
581 set_def_peer_eth_addrs(void)
585 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
586 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
587 peer_eth_addrs[i].addr_bytes[5] = i;
592 set_default_fwd_ports_config(void)
597 RTE_ETH_FOREACH_DEV(pt_id) {
598 fwd_ports_ids[i++] = pt_id;
600 /* Update sockets info according to the attached device */
601 int socket_id = rte_eth_dev_socket_id(pt_id);
602 if (socket_id >= 0 && new_socket_id(socket_id)) {
603 if (num_sockets >= RTE_MAX_NUMA_NODES) {
604 rte_exit(EXIT_FAILURE,
605 "Total sockets greater than %u\n",
608 socket_ids[num_sockets++] = socket_id;
612 nb_cfg_ports = nb_ports;
613 nb_fwd_ports = nb_ports;
617 set_def_fwd_config(void)
619 set_default_fwd_lcores_config();
620 set_def_peer_eth_addrs();
621 set_default_fwd_ports_config();
624 /* extremely pessimistic estimation of memory required to create a mempool */
626 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
628 unsigned int n_pages, mbuf_per_pg, leftover;
629 uint64_t total_mem, mbuf_mem, obj_sz;
631 /* there is no good way to predict how much space the mempool will
632 * occupy because it will allocate chunks on the fly, and some of those
633 * will come from default DPDK memory while some will come from our
634 * external memory, so just assume 128MB will be enough for everyone.
636 uint64_t hdr_mem = 128 << 20;
638 /* account for possible non-contiguousness */
639 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
641 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
645 mbuf_per_pg = pgsz / obj_sz;
646 leftover = (nb_mbufs % mbuf_per_pg) > 0;
647 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
649 mbuf_mem = n_pages * pgsz;
651 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
653 if (total_mem > SIZE_MAX) {
654 TESTPMD_LOG(ERR, "Memory size too big\n");
657 *out = (size_t)total_mem;
663 pagesz_flags(uint64_t page_sz)
665 /* as per mmap() manpage, all page sizes are log2 of page size
666 * shifted by MAP_HUGE_SHIFT
668 int log2 = rte_log2_u64(page_sz);
670 return (log2 << HUGE_SHIFT);
674 alloc_mem(size_t memsz, size_t pgsz, bool huge)
679 /* allocate anonymous hugepages */
680 flags = MAP_ANONYMOUS | MAP_PRIVATE;
682 flags |= HUGE_FLAG | pagesz_flags(pgsz);
684 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
685 if (addr == MAP_FAILED)
691 struct extmem_param {
695 rte_iova_t *iova_table;
696 unsigned int iova_table_len;
700 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
703 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
704 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
705 unsigned int cur_page, n_pages, pgsz_idx;
706 size_t mem_sz, cur_pgsz;
707 rte_iova_t *iovas = NULL;
711 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
712 /* skip anything that is too big */
713 if (pgsizes[pgsz_idx] > SIZE_MAX)
716 cur_pgsz = pgsizes[pgsz_idx];
718 /* if we were told not to allocate hugepages, override */
720 cur_pgsz = sysconf(_SC_PAGESIZE);
722 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
724 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
728 /* allocate our memory */
729 addr = alloc_mem(mem_sz, cur_pgsz, huge);
731 /* if we couldn't allocate memory with a specified page size,
732 * that doesn't mean we can't do it with other page sizes, so
738 /* store IOVA addresses for every page in this memory area */
739 n_pages = mem_sz / cur_pgsz;
741 iovas = malloc(sizeof(*iovas) * n_pages);
744 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
747 /* lock memory if it's not huge pages */
751 /* populate IOVA addresses */
752 for (cur_page = 0; cur_page < n_pages; cur_page++) {
757 offset = cur_pgsz * cur_page;
758 cur = RTE_PTR_ADD(addr, offset);
760 /* touch the page before getting its IOVA */
761 *(volatile char *)cur = 0;
763 iova = rte_mem_virt2iova(cur);
765 iovas[cur_page] = iova;
770 /* if we couldn't allocate anything */
776 param->pgsz = cur_pgsz;
777 param->iova_table = iovas;
778 param->iova_table_len = n_pages;
785 munmap(addr, mem_sz);
791 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
793 struct extmem_param param;
796 memset(¶m, 0, sizeof(param));
798 /* check if our heap exists */
799 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
801 /* create our heap */
802 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
804 TESTPMD_LOG(ERR, "Cannot create heap\n");
809 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
811 TESTPMD_LOG(ERR, "Cannot create memory area\n");
815 /* we now have a valid memory area, so add it to heap */
816 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
817 param.addr, param.len, param.iova_table,
818 param.iova_table_len, param.pgsz);
820 /* when using VFIO, memory is automatically mapped for DMA by EAL */
822 /* not needed any more */
823 free(param.iova_table);
826 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
827 munmap(param.addr, param.len);
833 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
839 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
840 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
845 RTE_ETH_FOREACH_DEV(pid) {
846 struct rte_eth_dev *dev =
847 &rte_eth_devices[pid];
849 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
853 "unable to DMA unmap addr 0x%p "
855 memhdr->addr, dev->data->name);
858 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
861 "unable to un-register addr 0x%p\n", memhdr->addr);
866 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
867 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
870 size_t page_size = sysconf(_SC_PAGESIZE);
873 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
877 "unable to register addr 0x%p\n", memhdr->addr);
880 RTE_ETH_FOREACH_DEV(pid) {
881 struct rte_eth_dev *dev =
882 &rte_eth_devices[pid];
884 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
888 "unable to DMA map addr 0x%p "
890 memhdr->addr, dev->data->name);
896 * Configuration initialisation done once at init time.
898 static struct rte_mempool *
899 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
900 unsigned int socket_id)
902 char pool_name[RTE_MEMPOOL_NAMESIZE];
903 struct rte_mempool *rte_mp = NULL;
906 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
907 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
910 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
911 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
913 switch (mp_alloc_type) {
914 case MP_ALLOC_NATIVE:
916 /* wrapper to rte_mempool_create() */
917 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
918 rte_mbuf_best_mempool_ops());
919 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
920 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
925 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
926 mb_size, (unsigned int) mb_mempool_cache,
927 sizeof(struct rte_pktmbuf_pool_private),
928 socket_id, mempool_flags);
932 if (rte_mempool_populate_anon(rte_mp) == 0) {
933 rte_mempool_free(rte_mp);
937 rte_pktmbuf_pool_init(rte_mp, NULL);
938 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
939 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
943 case MP_ALLOC_XMEM_HUGE:
946 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
948 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
949 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
952 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
954 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
956 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
957 rte_mbuf_best_mempool_ops());
958 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
959 mb_mempool_cache, 0, mbuf_seg_size,
965 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
970 if (rte_mp == NULL) {
971 rte_exit(EXIT_FAILURE,
972 "Creation of mbuf pool for socket %u failed: %s\n",
973 socket_id, rte_strerror(rte_errno));
974 } else if (verbose_level > 0) {
975 rte_mempool_dump(stdout, rte_mp);
981 * Check given socket id is valid or not with NUMA mode,
982 * if valid, return 0, else return -1
985 check_socket_id(const unsigned int socket_id)
987 static int warning_once = 0;
989 if (new_socket_id(socket_id)) {
990 if (!warning_once && numa_support)
991 printf("Warning: NUMA should be configured manually by"
992 " using --port-numa-config and"
993 " --ring-numa-config parameters along with"
1002 * Get the allowed maximum number of RX queues.
1003 * *pid return the port id which has minimal value of
1004 * max_rx_queues in all ports.
1007 get_allowed_max_nb_rxq(portid_t *pid)
1009 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
1011 struct rte_eth_dev_info dev_info;
1013 RTE_ETH_FOREACH_DEV(pi) {
1014 rte_eth_dev_info_get(pi, &dev_info);
1015 if (dev_info.max_rx_queues < allowed_max_rxq) {
1016 allowed_max_rxq = dev_info.max_rx_queues;
1020 return allowed_max_rxq;
1024 * Check input rxq is valid or not.
1025 * If input rxq is not greater than any of maximum number
1026 * of RX queues of all ports, it is valid.
1027 * if valid, return 0, else return -1
1030 check_nb_rxq(queueid_t rxq)
1032 queueid_t allowed_max_rxq;
1035 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1036 if (rxq > allowed_max_rxq) {
1037 printf("Fail: input rxq (%u) can't be greater "
1038 "than max_rx_queues (%u) of port %u\n",
1048 * Get the allowed maximum number of TX queues.
1049 * *pid return the port id which has minimal value of
1050 * max_tx_queues in all ports.
1053 get_allowed_max_nb_txq(portid_t *pid)
1055 queueid_t allowed_max_txq = MAX_QUEUE_ID;
1057 struct rte_eth_dev_info dev_info;
1059 RTE_ETH_FOREACH_DEV(pi) {
1060 rte_eth_dev_info_get(pi, &dev_info);
1061 if (dev_info.max_tx_queues < allowed_max_txq) {
1062 allowed_max_txq = dev_info.max_tx_queues;
1066 return allowed_max_txq;
1070 * Check input txq is valid or not.
1071 * If input txq is not greater than any of maximum number
1072 * of TX queues of all ports, it is valid.
1073 * if valid, return 0, else return -1
1076 check_nb_txq(queueid_t txq)
1078 queueid_t allowed_max_txq;
1081 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1082 if (txq > allowed_max_txq) {
1083 printf("Fail: input txq (%u) can't be greater "
1084 "than max_tx_queues (%u) of port %u\n",
1097 struct rte_port *port;
1098 struct rte_mempool *mbp;
1099 unsigned int nb_mbuf_per_pool;
1101 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1102 struct rte_gro_param gro_param;
1108 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1110 /* Configuration of logical cores. */
1111 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1112 sizeof(struct fwd_lcore *) * nb_lcores,
1113 RTE_CACHE_LINE_SIZE);
1114 if (fwd_lcores == NULL) {
1115 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1116 "failed\n", nb_lcores);
1118 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1119 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1120 sizeof(struct fwd_lcore),
1121 RTE_CACHE_LINE_SIZE);
1122 if (fwd_lcores[lc_id] == NULL) {
1123 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1126 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1129 RTE_ETH_FOREACH_DEV(pid) {
1131 /* Apply default TxRx configuration for all ports */
1132 port->dev_conf.txmode = tx_mode;
1133 port->dev_conf.rxmode = rx_mode;
1134 rte_eth_dev_info_get(pid, &port->dev_info);
1136 if (!(port->dev_info.tx_offload_capa &
1137 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1138 port->dev_conf.txmode.offloads &=
1139 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1140 if (!(port->dev_info.tx_offload_capa &
1141 DEV_TX_OFFLOAD_MATCH_METADATA))
1142 port->dev_conf.txmode.offloads &=
1143 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1145 if (port_numa[pid] != NUMA_NO_CONFIG)
1146 port_per_socket[port_numa[pid]]++;
1148 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1151 * if socket_id is invalid,
1152 * set to the first available socket.
1154 if (check_socket_id(socket_id) < 0)
1155 socket_id = socket_ids[0];
1156 port_per_socket[socket_id]++;
1160 /* Apply Rx offloads configuration */
1161 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1162 port->rx_conf[k].offloads =
1163 port->dev_conf.rxmode.offloads;
1164 /* Apply Tx offloads configuration */
1165 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1166 port->tx_conf[k].offloads =
1167 port->dev_conf.txmode.offloads;
1169 /* set flag to initialize port/queue */
1170 port->need_reconfig = 1;
1171 port->need_reconfig_queues = 1;
1172 port->tx_metadata = 0;
1174 /* Check for maximum number of segments per MTU. Accordingly
1175 * update the mbuf data size.
1177 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX) {
1178 data_size = rx_mode.max_rx_pkt_len /
1179 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1181 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1183 mbuf_data_size = data_size +
1184 RTE_PKTMBUF_HEADROOM;
1191 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1195 * Create pools of mbuf.
1196 * If NUMA support is disabled, create a single pool of mbuf in
1197 * socket 0 memory by default.
1198 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1200 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1201 * nb_txd can be configured at run time.
1203 if (param_total_num_mbufs)
1204 nb_mbuf_per_pool = param_total_num_mbufs;
1206 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1207 (nb_lcores * mb_mempool_cache) +
1208 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1209 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1215 for (i = 0; i < num_sockets; i++)
1216 mempools[i] = mbuf_pool_create(mbuf_data_size,
1220 if (socket_num == UMA_NO_CONFIG)
1221 mempools[0] = mbuf_pool_create(mbuf_data_size,
1222 nb_mbuf_per_pool, 0);
1224 mempools[socket_num] = mbuf_pool_create
1232 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1233 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1235 * Records which Mbuf pool to use by each logical core, if needed.
1237 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1238 mbp = mbuf_pool_find(
1239 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1242 mbp = mbuf_pool_find(0);
1243 fwd_lcores[lc_id]->mbp = mbp;
1244 /* initialize GSO context */
1245 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1246 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1247 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1248 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1250 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1253 /* Configuration of packet forwarding streams. */
1254 if (init_fwd_streams() < 0)
1255 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1259 /* create a gro context for each lcore */
1260 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1261 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1262 gro_param.max_item_per_flow = MAX_PKT_BURST;
1263 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1264 gro_param.socket_id = rte_lcore_to_socket_id(
1265 fwd_lcores_cpuids[lc_id]);
1266 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1267 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1268 rte_exit(EXIT_FAILURE,
1269 "rte_gro_ctx_create() failed\n");
1273 #if defined RTE_LIBRTE_PMD_SOFTNIC
1274 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1275 RTE_ETH_FOREACH_DEV(pid) {
1277 const char *driver = port->dev_info.driver_name;
1279 if (strcmp(driver, "net_softnic") == 0)
1280 port->softport.fwd_lcore_arg = fwd_lcores;
1289 reconfig(portid_t new_port_id, unsigned socket_id)
1291 struct rte_port *port;
1293 /* Reconfiguration of Ethernet ports. */
1294 port = &ports[new_port_id];
1295 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1297 /* set flag to initialize port/queue */
1298 port->need_reconfig = 1;
1299 port->need_reconfig_queues = 1;
1300 port->socket_id = socket_id;
1307 init_fwd_streams(void)
1310 struct rte_port *port;
1311 streamid_t sm_id, nb_fwd_streams_new;
1314 /* set socket id according to numa or not */
1315 RTE_ETH_FOREACH_DEV(pid) {
1317 if (nb_rxq > port->dev_info.max_rx_queues) {
1318 printf("Fail: nb_rxq(%d) is greater than "
1319 "max_rx_queues(%d)\n", nb_rxq,
1320 port->dev_info.max_rx_queues);
1323 if (nb_txq > port->dev_info.max_tx_queues) {
1324 printf("Fail: nb_txq(%d) is greater than "
1325 "max_tx_queues(%d)\n", nb_txq,
1326 port->dev_info.max_tx_queues);
1330 if (port_numa[pid] != NUMA_NO_CONFIG)
1331 port->socket_id = port_numa[pid];
1333 port->socket_id = rte_eth_dev_socket_id(pid);
1336 * if socket_id is invalid,
1337 * set to the first available socket.
1339 if (check_socket_id(port->socket_id) < 0)
1340 port->socket_id = socket_ids[0];
1344 if (socket_num == UMA_NO_CONFIG)
1345 port->socket_id = 0;
1347 port->socket_id = socket_num;
1351 q = RTE_MAX(nb_rxq, nb_txq);
1353 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1356 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1357 if (nb_fwd_streams_new == nb_fwd_streams)
1360 if (fwd_streams != NULL) {
1361 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1362 if (fwd_streams[sm_id] == NULL)
1364 rte_free(fwd_streams[sm_id]);
1365 fwd_streams[sm_id] = NULL;
1367 rte_free(fwd_streams);
1372 nb_fwd_streams = nb_fwd_streams_new;
1373 if (nb_fwd_streams) {
1374 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1375 sizeof(struct fwd_stream *) * nb_fwd_streams,
1376 RTE_CACHE_LINE_SIZE);
1377 if (fwd_streams == NULL)
1378 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1379 " (struct fwd_stream *)) failed\n",
1382 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1383 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1384 " struct fwd_stream", sizeof(struct fwd_stream),
1385 RTE_CACHE_LINE_SIZE);
1386 if (fwd_streams[sm_id] == NULL)
1387 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1388 "(struct fwd_stream) failed\n");
1395 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1397 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1399 unsigned int total_burst;
1400 unsigned int nb_burst;
1401 unsigned int burst_stats[3];
1402 uint16_t pktnb_stats[3];
1404 int burst_percent[3];
1407 * First compute the total number of packet bursts and the
1408 * two highest numbers of bursts of the same number of packets.
1411 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1412 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1413 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1414 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1417 total_burst += nb_burst;
1418 if (nb_burst > burst_stats[0]) {
1419 burst_stats[1] = burst_stats[0];
1420 pktnb_stats[1] = pktnb_stats[0];
1421 burst_stats[0] = nb_burst;
1422 pktnb_stats[0] = nb_pkt;
1423 } else if (nb_burst > burst_stats[1]) {
1424 burst_stats[1] = nb_burst;
1425 pktnb_stats[1] = nb_pkt;
1428 if (total_burst == 0)
1430 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1431 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1432 burst_percent[0], (int) pktnb_stats[0]);
1433 if (burst_stats[0] == total_burst) {
1437 if (burst_stats[0] + burst_stats[1] == total_burst) {
1438 printf(" + %d%% of %d pkts]\n",
1439 100 - burst_percent[0], pktnb_stats[1]);
1442 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1443 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1444 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1445 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1448 printf(" + %d%% of %d pkts + %d%% of others]\n",
1449 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1451 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1454 fwd_stream_stats_display(streamid_t stream_id)
1456 struct fwd_stream *fs;
1457 static const char *fwd_top_stats_border = "-------";
1459 fs = fwd_streams[stream_id];
1460 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1461 (fs->fwd_dropped == 0))
1463 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1464 "TX Port=%2d/Queue=%2d %s\n",
1465 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1466 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1467 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1468 " TX-dropped: %-14"PRIu64,
1469 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1471 /* if checksum mode */
1472 if (cur_fwd_eng == &csum_fwd_engine) {
1473 printf(" RX- bad IP checksum: %-14"PRIu64
1474 " Rx- bad L4 checksum: %-14"PRIu64
1475 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1476 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1477 fs->rx_bad_outer_l4_csum);
1482 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1483 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1484 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1489 fwd_stats_display(void)
1491 static const char *fwd_stats_border = "----------------------";
1492 static const char *acc_stats_border = "+++++++++++++++";
1494 struct fwd_stream *rx_stream;
1495 struct fwd_stream *tx_stream;
1496 uint64_t tx_dropped;
1497 uint64_t rx_bad_ip_csum;
1498 uint64_t rx_bad_l4_csum;
1499 uint64_t rx_bad_outer_l4_csum;
1500 } ports_stats[RTE_MAX_ETHPORTS];
1501 uint64_t total_rx_dropped = 0;
1502 uint64_t total_tx_dropped = 0;
1503 uint64_t total_rx_nombuf = 0;
1504 struct rte_eth_stats stats;
1505 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1506 uint64_t fwd_cycles = 0;
1508 uint64_t total_recv = 0;
1509 uint64_t total_xmit = 0;
1510 struct rte_port *port;
1515 memset(ports_stats, 0, sizeof(ports_stats));
1517 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1518 struct fwd_stream *fs = fwd_streams[sm_id];
1520 if (cur_fwd_config.nb_fwd_streams >
1521 cur_fwd_config.nb_fwd_ports) {
1522 fwd_stream_stats_display(sm_id);
1524 ports_stats[fs->tx_port].tx_stream = fs;
1525 ports_stats[fs->rx_port].rx_stream = fs;
1528 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1530 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1531 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1532 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1533 fs->rx_bad_outer_l4_csum;
1535 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1536 fwd_cycles += fs->core_cycles;
1539 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1542 pt_id = fwd_ports_ids[i];
1543 port = &ports[pt_id];
1545 rte_eth_stats_get(pt_id, &stats);
1546 stats.ipackets -= port->stats.ipackets;
1547 stats.opackets -= port->stats.opackets;
1548 stats.ibytes -= port->stats.ibytes;
1549 stats.obytes -= port->stats.obytes;
1550 stats.imissed -= port->stats.imissed;
1551 stats.oerrors -= port->stats.oerrors;
1552 stats.rx_nombuf -= port->stats.rx_nombuf;
1554 total_recv += stats.ipackets;
1555 total_xmit += stats.opackets;
1556 total_rx_dropped += stats.imissed;
1557 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1558 total_tx_dropped += stats.oerrors;
1559 total_rx_nombuf += stats.rx_nombuf;
1561 printf("\n %s Forward statistics for port %-2d %s\n",
1562 fwd_stats_border, pt_id, fwd_stats_border);
1564 if (!port->rx_queue_stats_mapping_enabled &&
1565 !port->tx_queue_stats_mapping_enabled) {
1566 printf(" RX-packets: %-14"PRIu64
1567 " RX-dropped: %-14"PRIu64
1568 "RX-total: %-"PRIu64"\n",
1569 stats.ipackets, stats.imissed,
1570 stats.ipackets + stats.imissed);
1572 if (cur_fwd_eng == &csum_fwd_engine)
1573 printf(" Bad-ipcsum: %-14"PRIu64
1574 " Bad-l4csum: %-14"PRIu64
1575 "Bad-outer-l4csum: %-14"PRIu64"\n",
1576 ports_stats[pt_id].rx_bad_ip_csum,
1577 ports_stats[pt_id].rx_bad_l4_csum,
1578 ports_stats[pt_id].rx_bad_outer_l4_csum);
1579 if (stats.ierrors + stats.rx_nombuf > 0) {
1580 printf(" RX-error: %-"PRIu64"\n",
1582 printf(" RX-nombufs: %-14"PRIu64"\n",
1586 printf(" TX-packets: %-14"PRIu64
1587 " TX-dropped: %-14"PRIu64
1588 "TX-total: %-"PRIu64"\n",
1589 stats.opackets, ports_stats[pt_id].tx_dropped,
1590 stats.opackets + ports_stats[pt_id].tx_dropped);
1592 printf(" RX-packets: %14"PRIu64
1593 " RX-dropped:%14"PRIu64
1594 " RX-total:%14"PRIu64"\n",
1595 stats.ipackets, stats.imissed,
1596 stats.ipackets + stats.imissed);
1598 if (cur_fwd_eng == &csum_fwd_engine)
1599 printf(" Bad-ipcsum:%14"PRIu64
1600 " Bad-l4csum:%14"PRIu64
1601 " Bad-outer-l4csum: %-14"PRIu64"\n",
1602 ports_stats[pt_id].rx_bad_ip_csum,
1603 ports_stats[pt_id].rx_bad_l4_csum,
1604 ports_stats[pt_id].rx_bad_outer_l4_csum);
1605 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1606 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1607 printf(" RX-nombufs: %14"PRIu64"\n",
1611 printf(" TX-packets: %14"PRIu64
1612 " TX-dropped:%14"PRIu64
1613 " TX-total:%14"PRIu64"\n",
1614 stats.opackets, ports_stats[pt_id].tx_dropped,
1615 stats.opackets + ports_stats[pt_id].tx_dropped);
1618 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1619 if (ports_stats[pt_id].rx_stream)
1620 pkt_burst_stats_display("RX",
1621 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1622 if (ports_stats[pt_id].tx_stream)
1623 pkt_burst_stats_display("TX",
1624 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1627 if (port->rx_queue_stats_mapping_enabled) {
1629 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1630 printf(" Stats reg %2d RX-packets:%14"PRIu64
1631 " RX-errors:%14"PRIu64
1632 " RX-bytes:%14"PRIu64"\n",
1633 j, stats.q_ipackets[j],
1634 stats.q_errors[j], stats.q_ibytes[j]);
1638 if (port->tx_queue_stats_mapping_enabled) {
1639 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1640 printf(" Stats reg %2d TX-packets:%14"PRIu64
1643 j, stats.q_opackets[j],
1648 printf(" %s--------------------------------%s\n",
1649 fwd_stats_border, fwd_stats_border);
1652 printf("\n %s Accumulated forward statistics for all ports"
1654 acc_stats_border, acc_stats_border);
1655 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1657 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1659 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1660 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1661 if (total_rx_nombuf > 0)
1662 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1663 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1665 acc_stats_border, acc_stats_border);
1666 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1668 printf("\n CPU cycles/packet=%u (total cycles="
1669 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1670 (unsigned int)(fwd_cycles / total_recv),
1671 fwd_cycles, total_recv);
1676 fwd_stats_reset(void)
1682 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1683 pt_id = fwd_ports_ids[i];
1684 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1686 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1687 struct fwd_stream *fs = fwd_streams[sm_id];
1691 fs->fwd_dropped = 0;
1692 fs->rx_bad_ip_csum = 0;
1693 fs->rx_bad_l4_csum = 0;
1694 fs->rx_bad_outer_l4_csum = 0;
1696 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1697 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1698 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1700 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1701 fs->core_cycles = 0;
1707 flush_fwd_rx_queues(void)
1709 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1716 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1717 uint64_t timer_period;
1719 /* convert to number of cycles */
1720 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1722 for (j = 0; j < 2; j++) {
1723 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1724 for (rxq = 0; rxq < nb_rxq; rxq++) {
1725 port_id = fwd_ports_ids[rxp];
1727 * testpmd can stuck in the below do while loop
1728 * if rte_eth_rx_burst() always returns nonzero
1729 * packets. So timer is added to exit this loop
1730 * after 1sec timer expiry.
1732 prev_tsc = rte_rdtsc();
1734 nb_rx = rte_eth_rx_burst(port_id, rxq,
1735 pkts_burst, MAX_PKT_BURST);
1736 for (i = 0; i < nb_rx; i++)
1737 rte_pktmbuf_free(pkts_burst[i]);
1739 cur_tsc = rte_rdtsc();
1740 diff_tsc = cur_tsc - prev_tsc;
1741 timer_tsc += diff_tsc;
1742 } while ((nb_rx > 0) &&
1743 (timer_tsc < timer_period));
1747 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1752 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1754 struct fwd_stream **fsm;
1757 #ifdef RTE_LIBRTE_BITRATE
1758 uint64_t tics_per_1sec;
1759 uint64_t tics_datum;
1760 uint64_t tics_current;
1761 uint16_t i, cnt_ports;
1763 cnt_ports = nb_ports;
1764 tics_datum = rte_rdtsc();
1765 tics_per_1sec = rte_get_timer_hz();
1767 fsm = &fwd_streams[fc->stream_idx];
1768 nb_fs = fc->stream_nb;
1770 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1771 (*pkt_fwd)(fsm[sm_id]);
1772 #ifdef RTE_LIBRTE_BITRATE
1773 if (bitrate_enabled != 0 &&
1774 bitrate_lcore_id == rte_lcore_id()) {
1775 tics_current = rte_rdtsc();
1776 if (tics_current - tics_datum >= tics_per_1sec) {
1777 /* Periodic bitrate calculation */
1778 for (i = 0; i < cnt_ports; i++)
1779 rte_stats_bitrate_calc(bitrate_data,
1781 tics_datum = tics_current;
1785 #ifdef RTE_LIBRTE_LATENCY_STATS
1786 if (latencystats_enabled != 0 &&
1787 latencystats_lcore_id == rte_lcore_id())
1788 rte_latencystats_update();
1791 } while (! fc->stopped);
1795 start_pkt_forward_on_core(void *fwd_arg)
1797 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1798 cur_fwd_config.fwd_eng->packet_fwd);
1803 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1804 * Used to start communication flows in network loopback test configurations.
1807 run_one_txonly_burst_on_core(void *fwd_arg)
1809 struct fwd_lcore *fwd_lc;
1810 struct fwd_lcore tmp_lcore;
1812 fwd_lc = (struct fwd_lcore *) fwd_arg;
1813 tmp_lcore = *fwd_lc;
1814 tmp_lcore.stopped = 1;
1815 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1820 * Launch packet forwarding:
1821 * - Setup per-port forwarding context.
1822 * - launch logical cores with their forwarding configuration.
1825 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1827 port_fwd_begin_t port_fwd_begin;
1832 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1833 if (port_fwd_begin != NULL) {
1834 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1835 (*port_fwd_begin)(fwd_ports_ids[i]);
1837 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1838 lc_id = fwd_lcores_cpuids[i];
1839 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1840 fwd_lcores[i]->stopped = 0;
1841 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1842 fwd_lcores[i], lc_id);
1844 printf("launch lcore %u failed - diag=%d\n",
1851 * Launch packet forwarding configuration.
1854 start_packet_forwarding(int with_tx_first)
1856 port_fwd_begin_t port_fwd_begin;
1857 port_fwd_end_t port_fwd_end;
1858 struct rte_port *port;
1862 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1863 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1865 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1866 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1868 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1869 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1870 (!nb_rxq || !nb_txq))
1871 rte_exit(EXIT_FAILURE,
1872 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1873 cur_fwd_eng->fwd_mode_name);
1875 if (all_ports_started() == 0) {
1876 printf("Not all ports were started\n");
1879 if (test_done == 0) {
1880 printf("Packet forwarding already started\n");
1886 for (i = 0; i < nb_fwd_ports; i++) {
1887 pt_id = fwd_ports_ids[i];
1888 port = &ports[pt_id];
1889 if (!port->dcb_flag) {
1890 printf("In DCB mode, all forwarding ports must "
1891 "be configured in this mode.\n");
1895 if (nb_fwd_lcores == 1) {
1896 printf("In DCB mode,the nb forwarding cores "
1897 "should be larger than 1.\n");
1906 flush_fwd_rx_queues();
1908 pkt_fwd_config_display(&cur_fwd_config);
1909 rxtx_config_display();
1912 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1913 pt_id = fwd_ports_ids[i];
1914 port = &ports[pt_id];
1915 map_port_queue_stats_mapping_registers(pt_id, port);
1917 if (with_tx_first) {
1918 port_fwd_begin = tx_only_engine.port_fwd_begin;
1919 if (port_fwd_begin != NULL) {
1920 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1921 (*port_fwd_begin)(fwd_ports_ids[i]);
1923 while (with_tx_first--) {
1924 launch_packet_forwarding(
1925 run_one_txonly_burst_on_core);
1926 rte_eal_mp_wait_lcore();
1928 port_fwd_end = tx_only_engine.port_fwd_end;
1929 if (port_fwd_end != NULL) {
1930 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1931 (*port_fwd_end)(fwd_ports_ids[i]);
1934 launch_packet_forwarding(start_pkt_forward_on_core);
1938 stop_packet_forwarding(void)
1940 port_fwd_end_t port_fwd_end;
1946 printf("Packet forwarding not started\n");
1949 printf("Telling cores to stop...");
1950 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1951 fwd_lcores[lc_id]->stopped = 1;
1952 printf("\nWaiting for lcores to finish...\n");
1953 rte_eal_mp_wait_lcore();
1954 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1955 if (port_fwd_end != NULL) {
1956 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1957 pt_id = fwd_ports_ids[i];
1958 (*port_fwd_end)(pt_id);
1962 fwd_stats_display();
1964 printf("\nDone.\n");
1969 dev_set_link_up(portid_t pid)
1971 if (rte_eth_dev_set_link_up(pid) < 0)
1972 printf("\nSet link up fail.\n");
1976 dev_set_link_down(portid_t pid)
1978 if (rte_eth_dev_set_link_down(pid) < 0)
1979 printf("\nSet link down fail.\n");
1983 all_ports_started(void)
1986 struct rte_port *port;
1988 RTE_ETH_FOREACH_DEV(pi) {
1990 /* Check if there is a port which is not started */
1991 if ((port->port_status != RTE_PORT_STARTED) &&
1992 (port->slave_flag == 0))
1996 /* No port is not started */
2001 port_is_stopped(portid_t port_id)
2003 struct rte_port *port = &ports[port_id];
2005 if ((port->port_status != RTE_PORT_STOPPED) &&
2006 (port->slave_flag == 0))
2012 all_ports_stopped(void)
2016 RTE_ETH_FOREACH_DEV(pi) {
2017 if (!port_is_stopped(pi))
2025 port_is_started(portid_t port_id)
2027 if (port_id_is_invalid(port_id, ENABLED_WARN))
2030 if (ports[port_id].port_status != RTE_PORT_STARTED)
2037 start_port(portid_t pid)
2039 int diag, need_check_link_status = -1;
2042 struct rte_port *port;
2043 struct rte_ether_addr mac_addr;
2045 if (port_id_is_invalid(pid, ENABLED_WARN))
2050 RTE_ETH_FOREACH_DEV(pi) {
2051 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2054 need_check_link_status = 0;
2056 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2057 RTE_PORT_HANDLING) == 0) {
2058 printf("Port %d is now not stopped\n", pi);
2062 if (port->need_reconfig > 0) {
2063 port->need_reconfig = 0;
2065 if (flow_isolate_all) {
2066 int ret = port_flow_isolate(pi, 1);
2068 printf("Failed to apply isolated"
2069 " mode on port %d\n", pi);
2073 configure_rxtx_dump_callbacks(0);
2074 printf("Configuring Port %d (socket %u)\n", pi,
2076 /* configure port */
2077 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2080 if (rte_atomic16_cmpset(&(port->port_status),
2081 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2082 printf("Port %d can not be set back "
2083 "to stopped\n", pi);
2084 printf("Fail to configure port %d\n", pi);
2085 /* try to reconfigure port next time */
2086 port->need_reconfig = 1;
2090 if (port->need_reconfig_queues > 0) {
2091 port->need_reconfig_queues = 0;
2092 /* setup tx queues */
2093 for (qi = 0; qi < nb_txq; qi++) {
2094 if ((numa_support) &&
2095 (txring_numa[pi] != NUMA_NO_CONFIG))
2096 diag = rte_eth_tx_queue_setup(pi, qi,
2097 port->nb_tx_desc[qi],
2099 &(port->tx_conf[qi]));
2101 diag = rte_eth_tx_queue_setup(pi, qi,
2102 port->nb_tx_desc[qi],
2104 &(port->tx_conf[qi]));
2109 /* Fail to setup tx queue, return */
2110 if (rte_atomic16_cmpset(&(port->port_status),
2112 RTE_PORT_STOPPED) == 0)
2113 printf("Port %d can not be set back "
2114 "to stopped\n", pi);
2115 printf("Fail to configure port %d tx queues\n",
2117 /* try to reconfigure queues next time */
2118 port->need_reconfig_queues = 1;
2121 for (qi = 0; qi < nb_rxq; qi++) {
2122 /* setup rx queues */
2123 if ((numa_support) &&
2124 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2125 struct rte_mempool * mp =
2126 mbuf_pool_find(rxring_numa[pi]);
2128 printf("Failed to setup RX queue:"
2129 "No mempool allocation"
2130 " on the socket %d\n",
2135 diag = rte_eth_rx_queue_setup(pi, qi,
2136 port->nb_rx_desc[qi],
2138 &(port->rx_conf[qi]),
2141 struct rte_mempool *mp =
2142 mbuf_pool_find(port->socket_id);
2144 printf("Failed to setup RX queue:"
2145 "No mempool allocation"
2146 " on the socket %d\n",
2150 diag = rte_eth_rx_queue_setup(pi, qi,
2151 port->nb_rx_desc[qi],
2153 &(port->rx_conf[qi]),
2159 /* Fail to setup rx queue, return */
2160 if (rte_atomic16_cmpset(&(port->port_status),
2162 RTE_PORT_STOPPED) == 0)
2163 printf("Port %d can not be set back "
2164 "to stopped\n", pi);
2165 printf("Fail to configure port %d rx queues\n",
2167 /* try to reconfigure queues next time */
2168 port->need_reconfig_queues = 1;
2172 configure_rxtx_dump_callbacks(verbose_level);
2174 if (rte_eth_dev_start(pi) < 0) {
2175 printf("Fail to start port %d\n", pi);
2177 /* Fail to setup rx queue, return */
2178 if (rte_atomic16_cmpset(&(port->port_status),
2179 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2180 printf("Port %d can not be set back to "
2185 if (rte_atomic16_cmpset(&(port->port_status),
2186 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2187 printf("Port %d can not be set into started\n", pi);
2189 rte_eth_macaddr_get(pi, &mac_addr);
2190 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2191 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2192 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2193 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2195 /* at least one port started, need checking link status */
2196 need_check_link_status = 1;
2199 if (need_check_link_status == 1 && !no_link_check)
2200 check_all_ports_link_status(RTE_PORT_ALL);
2201 else if (need_check_link_status == 0)
2202 printf("Please stop the ports first\n");
2209 stop_port(portid_t pid)
2212 struct rte_port *port;
2213 int need_check_link_status = 0;
2220 if (port_id_is_invalid(pid, ENABLED_WARN))
2223 printf("Stopping ports...\n");
2225 RTE_ETH_FOREACH_DEV(pi) {
2226 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2229 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2230 printf("Please remove port %d from forwarding configuration.\n", pi);
2234 if (port_is_bonding_slave(pi)) {
2235 printf("Please remove port %d from bonded device.\n", pi);
2240 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2241 RTE_PORT_HANDLING) == 0)
2244 rte_eth_dev_stop(pi);
2246 if (rte_atomic16_cmpset(&(port->port_status),
2247 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2248 printf("Port %d can not be set into stopped\n", pi);
2249 need_check_link_status = 1;
2251 if (need_check_link_status && !no_link_check)
2252 check_all_ports_link_status(RTE_PORT_ALL);
2258 remove_invalid_ports_in(portid_t *array, portid_t *total)
2261 portid_t new_total = 0;
2263 for (i = 0; i < *total; i++)
2264 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2265 array[new_total] = array[i];
2272 remove_invalid_ports(void)
2274 remove_invalid_ports_in(ports_ids, &nb_ports);
2275 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2276 nb_cfg_ports = nb_fwd_ports;
2280 close_port(portid_t pid)
2283 struct rte_port *port;
2285 if (port_id_is_invalid(pid, ENABLED_WARN))
2288 printf("Closing ports...\n");
2290 RTE_ETH_FOREACH_DEV(pi) {
2291 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2294 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2295 printf("Please remove port %d from forwarding configuration.\n", pi);
2299 if (port_is_bonding_slave(pi)) {
2300 printf("Please remove port %d from bonded device.\n", pi);
2305 if (rte_atomic16_cmpset(&(port->port_status),
2306 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2307 printf("Port %d is already closed\n", pi);
2311 if (rte_atomic16_cmpset(&(port->port_status),
2312 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2313 printf("Port %d is now not stopped\n", pi);
2317 if (port->flow_list)
2318 port_flow_flush(pi);
2319 rte_eth_dev_close(pi);
2321 remove_invalid_ports();
2323 if (rte_atomic16_cmpset(&(port->port_status),
2324 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2325 printf("Port %d cannot be set to closed\n", pi);
2332 reset_port(portid_t pid)
2336 struct rte_port *port;
2338 if (port_id_is_invalid(pid, ENABLED_WARN))
2341 printf("Resetting ports...\n");
2343 RTE_ETH_FOREACH_DEV(pi) {
2344 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2347 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2348 printf("Please remove port %d from forwarding "
2349 "configuration.\n", pi);
2353 if (port_is_bonding_slave(pi)) {
2354 printf("Please remove port %d from bonded device.\n",
2359 diag = rte_eth_dev_reset(pi);
2362 port->need_reconfig = 1;
2363 port->need_reconfig_queues = 1;
2365 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2373 attach_port(char *identifier)
2376 struct rte_dev_iterator iterator;
2378 printf("Attaching a new port...\n");
2380 if (identifier == NULL) {
2381 printf("Invalid parameters are specified\n");
2385 if (rte_dev_probe(identifier) != 0) {
2386 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2390 /* first attach mode: event */
2391 if (setup_on_probe_event) {
2392 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2393 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2394 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2395 ports[pi].need_setup != 0)
2396 setup_attached_port(pi);
2400 /* second attach mode: iterator */
2401 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2402 /* setup ports matching the devargs used for probing */
2403 if (port_is_forwarding(pi))
2404 continue; /* port was already attached before */
2405 setup_attached_port(pi);
2410 setup_attached_port(portid_t pi)
2412 unsigned int socket_id;
2414 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2415 /* if socket_id is invalid, set to the first available socket. */
2416 if (check_socket_id(socket_id) < 0)
2417 socket_id = socket_ids[0];
2418 reconfig(pi, socket_id);
2419 rte_eth_promiscuous_enable(pi);
2421 ports_ids[nb_ports++] = pi;
2422 fwd_ports_ids[nb_fwd_ports++] = pi;
2423 nb_cfg_ports = nb_fwd_ports;
2424 ports[pi].need_setup = 0;
2425 ports[pi].port_status = RTE_PORT_STOPPED;
2427 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2432 detach_port_device(portid_t port_id)
2434 struct rte_device *dev;
2437 printf("Removing a device...\n");
2439 dev = rte_eth_devices[port_id].device;
2441 printf("Device already removed\n");
2445 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2446 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2447 printf("Port not stopped\n");
2450 printf("Port was not closed\n");
2451 if (ports[port_id].flow_list)
2452 port_flow_flush(port_id);
2455 if (rte_dev_remove(dev) != 0) {
2456 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2459 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2460 /* reset mapping between old ports and removed device */
2461 rte_eth_devices[sibling].device = NULL;
2462 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2463 /* sibling ports are forced to be closed */
2464 ports[sibling].port_status = RTE_PORT_CLOSED;
2465 printf("Port %u is closed\n", sibling);
2469 remove_invalid_ports();
2471 printf("Device of port %u is detached\n", port_id);
2472 printf("Now total ports is %d\n", nb_ports);
2480 struct rte_device *device;
2486 stop_packet_forwarding();
2488 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2490 if (mp_alloc_type == MP_ALLOC_ANON)
2491 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2495 if (ports != NULL) {
2497 RTE_ETH_FOREACH_DEV(pt_id) {
2498 printf("\nStopping port %d...\n", pt_id);
2502 RTE_ETH_FOREACH_DEV(pt_id) {
2503 printf("\nShutting down port %d...\n", pt_id);
2508 * This is a workaround to fix a virtio-user issue that
2509 * requires to call clean-up routine to remove existing
2511 * This workaround valid only for testpmd, needs a fix
2512 * valid for all applications.
2513 * TODO: Implement proper resource cleanup
2515 device = rte_eth_devices[pt_id].device;
2516 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2517 detach_port_device(pt_id);
2522 ret = rte_dev_event_monitor_stop();
2525 "fail to stop device event monitor.");
2529 ret = rte_dev_event_callback_unregister(NULL,
2530 dev_event_callback, NULL);
2533 "fail to unregister device event callback.\n");
2537 ret = rte_dev_hotplug_handle_disable();
2540 "fail to disable hotplug handling.\n");
2544 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2546 rte_mempool_free(mempools[i]);
2549 printf("\nBye...\n");
2552 typedef void (*cmd_func_t)(void);
2553 struct pmd_test_command {
2554 const char *cmd_name;
2555 cmd_func_t cmd_func;
2558 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2560 /* Check the link status of all ports in up to 9s, and print them finally */
2562 check_all_ports_link_status(uint32_t port_mask)
2564 #define CHECK_INTERVAL 100 /* 100ms */
2565 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2567 uint8_t count, all_ports_up, print_flag = 0;
2568 struct rte_eth_link link;
2570 printf("Checking link statuses...\n");
2572 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2574 RTE_ETH_FOREACH_DEV(portid) {
2575 if ((port_mask & (1 << portid)) == 0)
2577 memset(&link, 0, sizeof(link));
2578 rte_eth_link_get_nowait(portid, &link);
2579 /* print link status if flag set */
2580 if (print_flag == 1) {
2581 if (link.link_status)
2583 "Port%d Link Up. speed %u Mbps- %s\n",
2584 portid, link.link_speed,
2585 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2586 ("full-duplex") : ("half-duplex\n"));
2588 printf("Port %d Link Down\n", portid);
2591 /* clear all_ports_up flag if any link down */
2592 if (link.link_status == ETH_LINK_DOWN) {
2597 /* after finally printing all link status, get out */
2598 if (print_flag == 1)
2601 if (all_ports_up == 0) {
2603 rte_delay_ms(CHECK_INTERVAL);
2606 /* set the print_flag if all ports up or timeout */
2607 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2617 * This callback is for remove a port for a device. It has limitation because
2618 * it is not for multiple port removal for a device.
2619 * TODO: the device detach invoke will plan to be removed from user side to
2620 * eal. And convert all PMDs to free port resources on ether device closing.
2623 rmv_port_callback(void *arg)
2625 int need_to_start = 0;
2626 int org_no_link_check = no_link_check;
2627 portid_t port_id = (intptr_t)arg;
2629 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2631 if (!test_done && port_is_forwarding(port_id)) {
2633 stop_packet_forwarding();
2637 no_link_check = org_no_link_check;
2638 close_port(port_id);
2639 detach_port_device(port_id);
2641 start_packet_forwarding(0);
2644 /* This function is used by the interrupt thread */
2646 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2649 RTE_SET_USED(param);
2650 RTE_SET_USED(ret_param);
2652 if (type >= RTE_ETH_EVENT_MAX) {
2653 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2654 port_id, __func__, type);
2656 } else if (event_print_mask & (UINT32_C(1) << type)) {
2657 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2658 eth_event_desc[type]);
2663 case RTE_ETH_EVENT_NEW:
2664 ports[port_id].need_setup = 1;
2665 ports[port_id].port_status = RTE_PORT_HANDLING;
2667 case RTE_ETH_EVENT_INTR_RMV:
2668 if (port_id_is_invalid(port_id, DISABLED_WARN))
2670 if (rte_eal_alarm_set(100000,
2671 rmv_port_callback, (void *)(intptr_t)port_id))
2672 fprintf(stderr, "Could not set up deferred device removal\n");
2681 register_eth_event_callback(void)
2684 enum rte_eth_event_type event;
2686 for (event = RTE_ETH_EVENT_UNKNOWN;
2687 event < RTE_ETH_EVENT_MAX; event++) {
2688 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2693 TESTPMD_LOG(ERR, "Failed to register callback for "
2694 "%s event\n", eth_event_desc[event]);
2702 /* This function is used by the interrupt thread */
2704 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2705 __rte_unused void *arg)
2710 if (type >= RTE_DEV_EVENT_MAX) {
2711 fprintf(stderr, "%s called upon invalid event %d\n",
2717 case RTE_DEV_EVENT_REMOVE:
2718 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2720 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2722 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2727 * Because the user's callback is invoked in eal interrupt
2728 * callback, the interrupt callback need to be finished before
2729 * it can be unregistered when detaching device. So finish
2730 * callback soon and use a deferred removal to detach device
2731 * is need. It is a workaround, once the device detaching be
2732 * moved into the eal in the future, the deferred removal could
2735 if (rte_eal_alarm_set(100000,
2736 rmv_port_callback, (void *)(intptr_t)port_id))
2738 "Could not set up deferred device removal\n");
2740 case RTE_DEV_EVENT_ADD:
2741 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2743 /* TODO: After finish kernel driver binding,
2744 * begin to attach port.
2753 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2757 uint8_t mapping_found = 0;
2759 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2760 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2761 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2762 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2763 tx_queue_stats_mappings[i].queue_id,
2764 tx_queue_stats_mappings[i].stats_counter_id);
2771 port->tx_queue_stats_mapping_enabled = 1;
2776 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2780 uint8_t mapping_found = 0;
2782 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2783 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2784 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2785 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2786 rx_queue_stats_mappings[i].queue_id,
2787 rx_queue_stats_mappings[i].stats_counter_id);
2794 port->rx_queue_stats_mapping_enabled = 1;
2799 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2803 diag = set_tx_queue_stats_mapping_registers(pi, port);
2805 if (diag == -ENOTSUP) {
2806 port->tx_queue_stats_mapping_enabled = 0;
2807 printf("TX queue stats mapping not supported port id=%d\n", pi);
2810 rte_exit(EXIT_FAILURE,
2811 "set_tx_queue_stats_mapping_registers "
2812 "failed for port id=%d diag=%d\n",
2816 diag = set_rx_queue_stats_mapping_registers(pi, port);
2818 if (diag == -ENOTSUP) {
2819 port->rx_queue_stats_mapping_enabled = 0;
2820 printf("RX queue stats mapping not supported port id=%d\n", pi);
2823 rte_exit(EXIT_FAILURE,
2824 "set_rx_queue_stats_mapping_registers "
2825 "failed for port id=%d diag=%d\n",
2831 rxtx_port_config(struct rte_port *port)
2836 for (qid = 0; qid < nb_rxq; qid++) {
2837 offloads = port->rx_conf[qid].offloads;
2838 port->rx_conf[qid] = port->dev_info.default_rxconf;
2839 port->rx_conf[qid].offloads |= offloads;
2841 /* Check if any Rx parameters have been passed */
2842 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2843 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2845 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2846 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2848 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2849 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2851 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2852 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2854 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2855 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2857 port->nb_rx_desc[qid] = nb_rxd;
2860 for (qid = 0; qid < nb_txq; qid++) {
2861 offloads = port->tx_conf[qid].offloads;
2862 port->tx_conf[qid] = port->dev_info.default_txconf;
2863 port->tx_conf[qid].offloads |= offloads;
2865 /* Check if any Tx parameters have been passed */
2866 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2867 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2869 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2870 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2872 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2873 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2875 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2876 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2878 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2879 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2881 port->nb_tx_desc[qid] = nb_txd;
2886 init_port_config(void)
2889 struct rte_port *port;
2891 RTE_ETH_FOREACH_DEV(pid) {
2893 port->dev_conf.fdir_conf = fdir_conf;
2894 rte_eth_dev_info_get(pid, &port->dev_info);
2896 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2897 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2898 rss_hf & port->dev_info.flow_type_rss_offloads;
2900 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2901 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2904 if (port->dcb_flag == 0) {
2905 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2906 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2908 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2911 rxtx_port_config(port);
2913 rte_eth_macaddr_get(pid, &port->eth_addr);
2915 map_port_queue_stats_mapping_registers(pid, port);
2916 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2917 rte_pmd_ixgbe_bypass_init(pid);
2920 if (lsc_interrupt &&
2921 (rte_eth_devices[pid].data->dev_flags &
2922 RTE_ETH_DEV_INTR_LSC))
2923 port->dev_conf.intr_conf.lsc = 1;
2924 if (rmv_interrupt &&
2925 (rte_eth_devices[pid].data->dev_flags &
2926 RTE_ETH_DEV_INTR_RMV))
2927 port->dev_conf.intr_conf.rmv = 1;
2931 void set_port_slave_flag(portid_t slave_pid)
2933 struct rte_port *port;
2935 port = &ports[slave_pid];
2936 port->slave_flag = 1;
2939 void clear_port_slave_flag(portid_t slave_pid)
2941 struct rte_port *port;
2943 port = &ports[slave_pid];
2944 port->slave_flag = 0;
2947 uint8_t port_is_bonding_slave(portid_t slave_pid)
2949 struct rte_port *port;
2951 port = &ports[slave_pid];
2952 if ((rte_eth_devices[slave_pid].data->dev_flags &
2953 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2958 const uint16_t vlan_tags[] = {
2959 0, 1, 2, 3, 4, 5, 6, 7,
2960 8, 9, 10, 11, 12, 13, 14, 15,
2961 16, 17, 18, 19, 20, 21, 22, 23,
2962 24, 25, 26, 27, 28, 29, 30, 31
2966 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2967 enum dcb_mode_enable dcb_mode,
2968 enum rte_eth_nb_tcs num_tcs,
2973 struct rte_eth_rss_conf rss_conf;
2976 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2977 * given above, and the number of traffic classes available for use.
2979 if (dcb_mode == DCB_VT_ENABLED) {
2980 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2981 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2982 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2983 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2985 /* VMDQ+DCB RX and TX configurations */
2986 vmdq_rx_conf->enable_default_pool = 0;
2987 vmdq_rx_conf->default_pool = 0;
2988 vmdq_rx_conf->nb_queue_pools =
2989 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2990 vmdq_tx_conf->nb_queue_pools =
2991 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2993 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2994 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2995 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2996 vmdq_rx_conf->pool_map[i].pools =
2997 1 << (i % vmdq_rx_conf->nb_queue_pools);
2999 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3000 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3001 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3004 /* set DCB mode of RX and TX of multiple queues */
3005 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
3006 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3008 struct rte_eth_dcb_rx_conf *rx_conf =
3009 ð_conf->rx_adv_conf.dcb_rx_conf;
3010 struct rte_eth_dcb_tx_conf *tx_conf =
3011 ð_conf->tx_adv_conf.dcb_tx_conf;
3013 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3017 rx_conf->nb_tcs = num_tcs;
3018 tx_conf->nb_tcs = num_tcs;
3020 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3021 rx_conf->dcb_tc[i] = i % num_tcs;
3022 tx_conf->dcb_tc[i] = i % num_tcs;
3025 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3026 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3027 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3031 eth_conf->dcb_capability_en =
3032 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3034 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3040 init_port_dcb_config(portid_t pid,
3041 enum dcb_mode_enable dcb_mode,
3042 enum rte_eth_nb_tcs num_tcs,
3045 struct rte_eth_conf port_conf;
3046 struct rte_port *rte_port;
3050 rte_port = &ports[pid];
3052 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3053 /* Enter DCB configuration status */
3056 port_conf.rxmode = rte_port->dev_conf.rxmode;
3057 port_conf.txmode = rte_port->dev_conf.txmode;
3059 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3060 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3063 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3065 /* re-configure the device . */
3066 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3069 rte_eth_dev_info_get(pid, &rte_port->dev_info);
3071 /* If dev_info.vmdq_pool_base is greater than 0,
3072 * the queue id of vmdq pools is started after pf queues.
3074 if (dcb_mode == DCB_VT_ENABLED &&
3075 rte_port->dev_info.vmdq_pool_base > 0) {
3076 printf("VMDQ_DCB multi-queue mode is nonsensical"
3077 " for port %d.", pid);
3081 /* Assume the ports in testpmd have the same dcb capability
3082 * and has the same number of rxq and txq in dcb mode
3084 if (dcb_mode == DCB_VT_ENABLED) {
3085 if (rte_port->dev_info.max_vfs > 0) {
3086 nb_rxq = rte_port->dev_info.nb_rx_queues;
3087 nb_txq = rte_port->dev_info.nb_tx_queues;
3089 nb_rxq = rte_port->dev_info.max_rx_queues;
3090 nb_txq = rte_port->dev_info.max_tx_queues;
3093 /*if vt is disabled, use all pf queues */
3094 if (rte_port->dev_info.vmdq_pool_base == 0) {
3095 nb_rxq = rte_port->dev_info.max_rx_queues;
3096 nb_txq = rte_port->dev_info.max_tx_queues;
3098 nb_rxq = (queueid_t)num_tcs;
3099 nb_txq = (queueid_t)num_tcs;
3103 rx_free_thresh = 64;
3105 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3107 rxtx_port_config(rte_port);
3109 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3110 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3111 rx_vft_set(pid, vlan_tags[i], 1);
3113 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3114 map_port_queue_stats_mapping_registers(pid, rte_port);
3116 rte_port->dcb_flag = 1;
3124 /* Configuration of Ethernet ports. */
3125 ports = rte_zmalloc("testpmd: ports",
3126 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3127 RTE_CACHE_LINE_SIZE);
3128 if (ports == NULL) {
3129 rte_exit(EXIT_FAILURE,
3130 "rte_zmalloc(%d struct rte_port) failed\n",
3134 /* Initialize ports NUMA structures */
3135 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3136 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3137 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3151 const char clr[] = { 27, '[', '2', 'J', '\0' };
3152 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3154 /* Clear screen and move to top left */
3155 printf("%s%s", clr, top_left);
3157 printf("\nPort statistics ====================================");
3158 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3159 nic_stats_display(fwd_ports_ids[i]);
3165 signal_handler(int signum)
3167 if (signum == SIGINT || signum == SIGTERM) {
3168 printf("\nSignal %d received, preparing to exit...\n",
3170 #ifdef RTE_LIBRTE_PDUMP
3171 /* uninitialize packet capture framework */
3174 #ifdef RTE_LIBRTE_LATENCY_STATS
3175 rte_latencystats_uninit();
3178 /* Set flag to indicate the force termination. */
3180 /* exit with the expected status */
3181 signal(signum, SIG_DFL);
3182 kill(getpid(), signum);
3187 main(int argc, char** argv)
3194 signal(SIGINT, signal_handler);
3195 signal(SIGTERM, signal_handler);
3197 diag = rte_eal_init(argc, argv);
3199 rte_panic("Cannot init EAL\n");
3201 testpmd_logtype = rte_log_register("testpmd");
3202 if (testpmd_logtype < 0)
3203 rte_panic("Cannot register log type");
3204 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3206 ret = register_eth_event_callback();
3208 rte_panic("Cannot register for ethdev events");
3210 #ifdef RTE_LIBRTE_PDUMP
3211 /* initialize packet capture framework */
3216 RTE_ETH_FOREACH_DEV(port_id) {
3217 ports_ids[count] = port_id;
3220 nb_ports = (portid_t) count;
3222 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3224 /* allocate port structures, and init them */
3227 set_def_fwd_config();
3229 rte_panic("Empty set of forwarding logical cores - check the "
3230 "core mask supplied in the command parameters\n");
3232 /* Bitrate/latency stats disabled by default */
3233 #ifdef RTE_LIBRTE_BITRATE
3234 bitrate_enabled = 0;
3236 #ifdef RTE_LIBRTE_LATENCY_STATS
3237 latencystats_enabled = 0;
3240 /* on FreeBSD, mlockall() is disabled by default */
3241 #ifdef RTE_EXEC_ENV_FREEBSD
3250 launch_args_parse(argc, argv);
3252 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3253 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3257 if (tx_first && interactive)
3258 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3259 "interactive mode.\n");
3261 if (tx_first && lsc_interrupt) {
3262 printf("Warning: lsc_interrupt needs to be off when "
3263 " using tx_first. Disabling.\n");
3267 if (!nb_rxq && !nb_txq)
3268 printf("Warning: Either rx or tx queues should be non-zero\n");
3270 if (nb_rxq > 1 && nb_rxq > nb_txq)
3271 printf("Warning: nb_rxq=%d enables RSS configuration, "
3272 "but nb_txq=%d will prevent to fully test it.\n",
3278 ret = rte_dev_hotplug_handle_enable();
3281 "fail to enable hotplug handling.");
3285 ret = rte_dev_event_monitor_start();
3288 "fail to start device event monitoring.");
3292 ret = rte_dev_event_callback_register(NULL,
3293 dev_event_callback, NULL);
3296 "fail to register device event callback\n");
3301 if (start_port(RTE_PORT_ALL) != 0)
3302 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3304 /* set all ports to promiscuous mode by default */
3305 RTE_ETH_FOREACH_DEV(port_id)
3306 rte_eth_promiscuous_enable(port_id);
3308 /* Init metrics library */
3309 rte_metrics_init(rte_socket_id());
3311 #ifdef RTE_LIBRTE_LATENCY_STATS
3312 if (latencystats_enabled != 0) {
3313 int ret = rte_latencystats_init(1, NULL);
3315 printf("Warning: latencystats init()"
3316 " returned error %d\n", ret);
3317 printf("Latencystats running on lcore %d\n",
3318 latencystats_lcore_id);
3322 /* Setup bitrate stats */
3323 #ifdef RTE_LIBRTE_BITRATE
3324 if (bitrate_enabled != 0) {
3325 bitrate_data = rte_stats_bitrate_create();
3326 if (bitrate_data == NULL)
3327 rte_exit(EXIT_FAILURE,
3328 "Could not allocate bitrate data.\n");
3329 rte_stats_bitrate_reg(bitrate_data);
3333 #ifdef RTE_LIBRTE_CMDLINE
3334 if (strlen(cmdline_filename) != 0)
3335 cmdline_read_from_file(cmdline_filename);
3337 if (interactive == 1) {
3339 printf("Start automatic packet forwarding\n");
3340 start_packet_forwarding(0);
3352 printf("No commandline core given, start packet forwarding\n");
3353 start_packet_forwarding(tx_first);
3354 if (stats_period != 0) {
3355 uint64_t prev_time = 0, cur_time, diff_time = 0;
3356 uint64_t timer_period;
3358 /* Convert to number of cycles */
3359 timer_period = stats_period * rte_get_timer_hz();
3361 while (f_quit == 0) {
3362 cur_time = rte_get_timer_cycles();
3363 diff_time += cur_time - prev_time;
3365 if (diff_time >= timer_period) {
3367 /* Reset the timer */
3370 /* Sleep to avoid unnecessary checks */
3371 prev_time = cur_time;
3376 printf("Press enter to exit\n");
3377 rc = read(0, &c, 1);