1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
242 * Configurable number of RX/TX ring descriptors.
243 * Defaults are supplied by drivers via ethdev.
245 #define RTE_TEST_RX_DESC_DEFAULT 0
246 #define RTE_TEST_TX_DESC_DEFAULT 0
247 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250 #define RTE_PMD_PARAM_UNSET -1
252 * Configurable values of RX and TX ring threshold registers.
255 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
264 * Configurable value of RX free threshold.
266 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX drop enable.
271 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
274 * Configurable value of TX free threshold.
276 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX RS bit threshold.
281 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of buffered packets before sending.
286 uint16_t noisy_tx_sw_bufsz;
289 * Configurable value of packet buffer timeout.
291 uint16_t noisy_tx_sw_buf_flush_time;
294 * Configurable value for size of VNF internal memory area
295 * used for simulating noisy neighbour behaviour
297 uint64_t noisy_lkup_mem_sz;
300 * Configurable value of number of random writes done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_writes;
306 * Configurable value of number of random reads done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads;
312 * Configurable value of number of random reads/writes done in
313 * VNF simulation memory area.
315 uint64_t noisy_lkup_num_reads_writes;
318 * Receive Side Scaling (RSS) configuration.
320 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
323 * Port topology configuration
325 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
328 * Avoids to flush all the RX streams before starts forwarding.
330 uint8_t no_flush_rx = 0; /* flush by default */
333 * Flow API isolated mode.
335 uint8_t flow_isolate_all;
338 * Avoids to check link status when starting/stopping a port.
340 uint8_t no_link_check = 0; /* check by default */
343 * Enable link status change notification
345 uint8_t lsc_interrupt = 1; /* enabled by default */
348 * Enable device removal notification.
350 uint8_t rmv_interrupt = 1; /* enabled by default */
352 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
354 /* After attach, port setup is called on event or by iterator */
355 bool setup_on_probe_event = true;
357 /* Pretty printing of ethdev events */
358 static const char * const eth_event_desc[] = {
359 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
360 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
361 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
362 [RTE_ETH_EVENT_INTR_RESET] = "reset",
363 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
364 [RTE_ETH_EVENT_IPSEC] = "IPsec",
365 [RTE_ETH_EVENT_MACSEC] = "MACsec",
366 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
367 [RTE_ETH_EVENT_NEW] = "device probed",
368 [RTE_ETH_EVENT_DESTROY] = "device released",
369 [RTE_ETH_EVENT_MAX] = NULL,
373 * Display or mask ether events
374 * Default to all events except VF_MBOX
376 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
377 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
378 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
379 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
380 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
381 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
382 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
384 * Decide if all memory are locked for performance.
389 * NIC bypass mode configuration options.
392 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
393 /* The NIC bypass watchdog timeout. */
394 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
398 #ifdef RTE_LIBRTE_LATENCY_STATS
401 * Set when latency stats is enabled in the commandline
403 uint8_t latencystats_enabled;
406 * Lcore ID to serive latency statistics.
408 lcoreid_t latencystats_lcore_id = -1;
413 * Ethernet device configuration.
415 struct rte_eth_rxmode rx_mode = {
416 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
417 /**< Default maximum frame length. */
420 struct rte_eth_txmode tx_mode = {
421 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
424 struct rte_fdir_conf fdir_conf = {
425 .mode = RTE_FDIR_MODE_NONE,
426 .pballoc = RTE_FDIR_PBALLOC_64K,
427 .status = RTE_FDIR_REPORT_STATUS,
429 .vlan_tci_mask = 0xFFEF,
431 .src_ip = 0xFFFFFFFF,
432 .dst_ip = 0xFFFFFFFF,
435 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
436 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
438 .src_port_mask = 0xFFFF,
439 .dst_port_mask = 0xFFFF,
440 .mac_addr_byte_mask = 0xFF,
441 .tunnel_type_mask = 1,
442 .tunnel_id_mask = 0xFFFFFFFF,
447 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
449 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
450 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
452 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
453 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
455 uint16_t nb_tx_queue_stats_mappings = 0;
456 uint16_t nb_rx_queue_stats_mappings = 0;
459 * Display zero values by default for xstats
461 uint8_t xstats_hide_zero;
463 unsigned int num_sockets = 0;
464 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
466 #ifdef RTE_LIBRTE_BITRATE
467 /* Bitrate statistics */
468 struct rte_stats_bitrates *bitrate_data;
469 lcoreid_t bitrate_lcore_id;
470 uint8_t bitrate_enabled;
473 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
474 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
476 struct vxlan_encap_conf vxlan_encap_conf = {
480 .vni = "\x00\x00\x00",
482 .udp_dst = RTE_BE16(4789),
483 .ipv4_src = IPv4(127, 0, 0, 1),
484 .ipv4_dst = IPv4(255, 255, 255, 255),
485 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
486 "\x00\x00\x00\x00\x00\x00\x00\x01",
487 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
488 "\x00\x00\x00\x00\x00\x00\x11\x11",
492 .eth_src = "\x00\x00\x00\x00\x00\x00",
493 .eth_dst = "\xff\xff\xff\xff\xff\xff",
496 struct nvgre_encap_conf nvgre_encap_conf = {
499 .tni = "\x00\x00\x00",
500 .ipv4_src = IPv4(127, 0, 0, 1),
501 .ipv4_dst = IPv4(255, 255, 255, 255),
502 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
503 "\x00\x00\x00\x00\x00\x00\x00\x01",
504 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
505 "\x00\x00\x00\x00\x00\x00\x11\x11",
507 .eth_src = "\x00\x00\x00\x00\x00\x00",
508 .eth_dst = "\xff\xff\xff\xff\xff\xff",
511 /* Forward function declarations */
512 static void setup_attached_port(portid_t pi);
513 static void map_port_queue_stats_mapping_registers(portid_t pi,
514 struct rte_port *port);
515 static void check_all_ports_link_status(uint32_t port_mask);
516 static int eth_event_callback(portid_t port_id,
517 enum rte_eth_event_type type,
518 void *param, void *ret_param);
519 static void dev_event_callback(const char *device_name,
520 enum rte_dev_event_type type,
524 * Check if all the ports are started.
525 * If yes, return positive value. If not, return zero.
527 static int all_ports_started(void);
529 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
530 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
533 * Helper function to check if socket is already discovered.
534 * If yes, return positive value. If not, return zero.
537 new_socket_id(unsigned int socket_id)
541 for (i = 0; i < num_sockets; i++) {
542 if (socket_ids[i] == socket_id)
549 * Setup default configuration.
552 set_default_fwd_lcores_config(void)
556 unsigned int sock_num;
559 for (i = 0; i < RTE_MAX_LCORE; i++) {
560 if (!rte_lcore_is_enabled(i))
562 sock_num = rte_lcore_to_socket_id(i);
563 if (new_socket_id(sock_num)) {
564 if (num_sockets >= RTE_MAX_NUMA_NODES) {
565 rte_exit(EXIT_FAILURE,
566 "Total sockets greater than %u\n",
569 socket_ids[num_sockets++] = sock_num;
571 if (i == rte_get_master_lcore())
573 fwd_lcores_cpuids[nb_lc++] = i;
575 nb_lcores = (lcoreid_t) nb_lc;
576 nb_cfg_lcores = nb_lcores;
581 set_def_peer_eth_addrs(void)
585 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
586 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
587 peer_eth_addrs[i].addr_bytes[5] = i;
592 set_default_fwd_ports_config(void)
597 RTE_ETH_FOREACH_DEV(pt_id) {
598 fwd_ports_ids[i++] = pt_id;
600 /* Update sockets info according to the attached device */
601 int socket_id = rte_eth_dev_socket_id(pt_id);
602 if (socket_id >= 0 && new_socket_id(socket_id)) {
603 if (num_sockets >= RTE_MAX_NUMA_NODES) {
604 rte_exit(EXIT_FAILURE,
605 "Total sockets greater than %u\n",
608 socket_ids[num_sockets++] = socket_id;
612 nb_cfg_ports = nb_ports;
613 nb_fwd_ports = nb_ports;
617 set_def_fwd_config(void)
619 set_default_fwd_lcores_config();
620 set_def_peer_eth_addrs();
621 set_default_fwd_ports_config();
624 /* extremely pessimistic estimation of memory required to create a mempool */
626 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
628 unsigned int n_pages, mbuf_per_pg, leftover;
629 uint64_t total_mem, mbuf_mem, obj_sz;
631 /* there is no good way to predict how much space the mempool will
632 * occupy because it will allocate chunks on the fly, and some of those
633 * will come from default DPDK memory while some will come from our
634 * external memory, so just assume 128MB will be enough for everyone.
636 uint64_t hdr_mem = 128 << 20;
638 /* account for possible non-contiguousness */
639 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
641 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
645 mbuf_per_pg = pgsz / obj_sz;
646 leftover = (nb_mbufs % mbuf_per_pg) > 0;
647 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
649 mbuf_mem = n_pages * pgsz;
651 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
653 if (total_mem > SIZE_MAX) {
654 TESTPMD_LOG(ERR, "Memory size too big\n");
657 *out = (size_t)total_mem;
663 pagesz_flags(uint64_t page_sz)
665 /* as per mmap() manpage, all page sizes are log2 of page size
666 * shifted by MAP_HUGE_SHIFT
668 int log2 = rte_log2_u64(page_sz);
670 return (log2 << HUGE_SHIFT);
674 alloc_mem(size_t memsz, size_t pgsz, bool huge)
679 /* allocate anonymous hugepages */
680 flags = MAP_ANONYMOUS | MAP_PRIVATE;
682 flags |= HUGE_FLAG | pagesz_flags(pgsz);
684 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
685 if (addr == MAP_FAILED)
691 struct extmem_param {
695 rte_iova_t *iova_table;
696 unsigned int iova_table_len;
700 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
703 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
704 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
705 unsigned int cur_page, n_pages, pgsz_idx;
706 size_t mem_sz, cur_pgsz;
707 rte_iova_t *iovas = NULL;
711 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
712 /* skip anything that is too big */
713 if (pgsizes[pgsz_idx] > SIZE_MAX)
716 cur_pgsz = pgsizes[pgsz_idx];
718 /* if we were told not to allocate hugepages, override */
720 cur_pgsz = sysconf(_SC_PAGESIZE);
722 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
724 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
728 /* allocate our memory */
729 addr = alloc_mem(mem_sz, cur_pgsz, huge);
731 /* if we couldn't allocate memory with a specified page size,
732 * that doesn't mean we can't do it with other page sizes, so
738 /* store IOVA addresses for every page in this memory area */
739 n_pages = mem_sz / cur_pgsz;
741 iovas = malloc(sizeof(*iovas) * n_pages);
744 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
747 /* lock memory if it's not huge pages */
751 /* populate IOVA addresses */
752 for (cur_page = 0; cur_page < n_pages; cur_page++) {
757 offset = cur_pgsz * cur_page;
758 cur = RTE_PTR_ADD(addr, offset);
760 /* touch the page before getting its IOVA */
761 *(volatile char *)cur = 0;
763 iova = rte_mem_virt2iova(cur);
765 iovas[cur_page] = iova;
770 /* if we couldn't allocate anything */
776 param->pgsz = cur_pgsz;
777 param->iova_table = iovas;
778 param->iova_table_len = n_pages;
785 munmap(addr, mem_sz);
791 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
793 struct extmem_param param;
796 memset(¶m, 0, sizeof(param));
798 /* check if our heap exists */
799 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
801 /* create our heap */
802 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
804 TESTPMD_LOG(ERR, "Cannot create heap\n");
809 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
811 TESTPMD_LOG(ERR, "Cannot create memory area\n");
815 /* we now have a valid memory area, so add it to heap */
816 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
817 param.addr, param.len, param.iova_table,
818 param.iova_table_len, param.pgsz);
820 /* when using VFIO, memory is automatically mapped for DMA by EAL */
822 /* not needed any more */
823 free(param.iova_table);
826 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
827 munmap(param.addr, param.len);
833 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
839 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
840 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
845 RTE_ETH_FOREACH_DEV(pid) {
846 struct rte_eth_dev *dev =
847 &rte_eth_devices[pid];
849 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
853 "unable to DMA unmap addr 0x%p "
855 memhdr->addr, dev->data->name);
858 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
861 "unable to un-register addr 0x%p\n", memhdr->addr);
866 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
867 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
870 size_t page_size = sysconf(_SC_PAGESIZE);
873 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
877 "unable to register addr 0x%p\n", memhdr->addr);
880 RTE_ETH_FOREACH_DEV(pid) {
881 struct rte_eth_dev *dev =
882 &rte_eth_devices[pid];
884 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
888 "unable to DMA map addr 0x%p "
890 memhdr->addr, dev->data->name);
896 * Configuration initialisation done once at init time.
898 static struct rte_mempool *
899 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
900 unsigned int socket_id)
902 char pool_name[RTE_MEMPOOL_NAMESIZE];
903 struct rte_mempool *rte_mp = NULL;
906 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
907 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
910 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
911 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
913 switch (mp_alloc_type) {
914 case MP_ALLOC_NATIVE:
916 /* wrapper to rte_mempool_create() */
917 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
918 rte_mbuf_best_mempool_ops());
919 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
920 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
925 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
926 mb_size, (unsigned int) mb_mempool_cache,
927 sizeof(struct rte_pktmbuf_pool_private),
928 socket_id, mempool_flags);
932 if (rte_mempool_populate_anon(rte_mp) == 0) {
933 rte_mempool_free(rte_mp);
937 rte_pktmbuf_pool_init(rte_mp, NULL);
938 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
939 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
943 case MP_ALLOC_XMEM_HUGE:
946 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
948 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
949 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
952 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
954 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
956 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
957 rte_mbuf_best_mempool_ops());
958 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
959 mb_mempool_cache, 0, mbuf_seg_size,
965 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
970 if (rte_mp == NULL) {
971 rte_exit(EXIT_FAILURE,
972 "Creation of mbuf pool for socket %u failed: %s\n",
973 socket_id, rte_strerror(rte_errno));
974 } else if (verbose_level > 0) {
975 rte_mempool_dump(stdout, rte_mp);
981 * Check given socket id is valid or not with NUMA mode,
982 * if valid, return 0, else return -1
985 check_socket_id(const unsigned int socket_id)
987 static int warning_once = 0;
989 if (new_socket_id(socket_id)) {
990 if (!warning_once && numa_support)
991 printf("Warning: NUMA should be configured manually by"
992 " using --port-numa-config and"
993 " --ring-numa-config parameters along with"
1002 * Get the allowed maximum number of RX queues.
1003 * *pid return the port id which has minimal value of
1004 * max_rx_queues in all ports.
1007 get_allowed_max_nb_rxq(portid_t *pid)
1009 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
1011 struct rte_eth_dev_info dev_info;
1013 RTE_ETH_FOREACH_DEV(pi) {
1014 rte_eth_dev_info_get(pi, &dev_info);
1015 if (dev_info.max_rx_queues < allowed_max_rxq) {
1016 allowed_max_rxq = dev_info.max_rx_queues;
1020 return allowed_max_rxq;
1024 * Check input rxq is valid or not.
1025 * If input rxq is not greater than any of maximum number
1026 * of RX queues of all ports, it is valid.
1027 * if valid, return 0, else return -1
1030 check_nb_rxq(queueid_t rxq)
1032 queueid_t allowed_max_rxq;
1035 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1036 if (rxq > allowed_max_rxq) {
1037 printf("Fail: input rxq (%u) can't be greater "
1038 "than max_rx_queues (%u) of port %u\n",
1048 * Get the allowed maximum number of TX queues.
1049 * *pid return the port id which has minimal value of
1050 * max_tx_queues in all ports.
1053 get_allowed_max_nb_txq(portid_t *pid)
1055 queueid_t allowed_max_txq = MAX_QUEUE_ID;
1057 struct rte_eth_dev_info dev_info;
1059 RTE_ETH_FOREACH_DEV(pi) {
1060 rte_eth_dev_info_get(pi, &dev_info);
1061 if (dev_info.max_tx_queues < allowed_max_txq) {
1062 allowed_max_txq = dev_info.max_tx_queues;
1066 return allowed_max_txq;
1070 * Check input txq is valid or not.
1071 * If input txq is not greater than any of maximum number
1072 * of TX queues of all ports, it is valid.
1073 * if valid, return 0, else return -1
1076 check_nb_txq(queueid_t txq)
1078 queueid_t allowed_max_txq;
1081 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1082 if (txq > allowed_max_txq) {
1083 printf("Fail: input txq (%u) can't be greater "
1084 "than max_tx_queues (%u) of port %u\n",
1097 struct rte_port *port;
1098 struct rte_mempool *mbp;
1099 unsigned int nb_mbuf_per_pool;
1101 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1102 struct rte_gro_param gro_param;
1106 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1108 /* Configuration of logical cores. */
1109 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1110 sizeof(struct fwd_lcore *) * nb_lcores,
1111 RTE_CACHE_LINE_SIZE);
1112 if (fwd_lcores == NULL) {
1113 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1114 "failed\n", nb_lcores);
1116 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1117 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1118 sizeof(struct fwd_lcore),
1119 RTE_CACHE_LINE_SIZE);
1120 if (fwd_lcores[lc_id] == NULL) {
1121 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1124 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1127 RTE_ETH_FOREACH_DEV(pid) {
1129 /* Apply default TxRx configuration for all ports */
1130 port->dev_conf.txmode = tx_mode;
1131 port->dev_conf.rxmode = rx_mode;
1132 rte_eth_dev_info_get(pid, &port->dev_info);
1134 if (!(port->dev_info.tx_offload_capa &
1135 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1136 port->dev_conf.txmode.offloads &=
1137 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1138 if (!(port->dev_info.tx_offload_capa &
1139 DEV_TX_OFFLOAD_MATCH_METADATA))
1140 port->dev_conf.txmode.offloads &=
1141 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1143 if (port_numa[pid] != NUMA_NO_CONFIG)
1144 port_per_socket[port_numa[pid]]++;
1146 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1149 * if socket_id is invalid,
1150 * set to the first available socket.
1152 if (check_socket_id(socket_id) < 0)
1153 socket_id = socket_ids[0];
1154 port_per_socket[socket_id]++;
1158 /* Apply Rx offloads configuration */
1159 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1160 port->rx_conf[k].offloads =
1161 port->dev_conf.rxmode.offloads;
1162 /* Apply Tx offloads configuration */
1163 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1164 port->tx_conf[k].offloads =
1165 port->dev_conf.txmode.offloads;
1167 /* set flag to initialize port/queue */
1168 port->need_reconfig = 1;
1169 port->need_reconfig_queues = 1;
1170 port->tx_metadata = 0;
1174 * Create pools of mbuf.
1175 * If NUMA support is disabled, create a single pool of mbuf in
1176 * socket 0 memory by default.
1177 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1179 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1180 * nb_txd can be configured at run time.
1182 if (param_total_num_mbufs)
1183 nb_mbuf_per_pool = param_total_num_mbufs;
1185 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1186 (nb_lcores * mb_mempool_cache) +
1187 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1188 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1194 for (i = 0; i < num_sockets; i++)
1195 mempools[i] = mbuf_pool_create(mbuf_data_size,
1199 if (socket_num == UMA_NO_CONFIG)
1200 mempools[0] = mbuf_pool_create(mbuf_data_size,
1201 nb_mbuf_per_pool, 0);
1203 mempools[socket_num] = mbuf_pool_create
1211 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1212 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1214 * Records which Mbuf pool to use by each logical core, if needed.
1216 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1217 mbp = mbuf_pool_find(
1218 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1221 mbp = mbuf_pool_find(0);
1222 fwd_lcores[lc_id]->mbp = mbp;
1223 /* initialize GSO context */
1224 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1225 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1226 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1227 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1229 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1232 /* Configuration of packet forwarding streams. */
1233 if (init_fwd_streams() < 0)
1234 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1238 /* create a gro context for each lcore */
1239 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1240 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1241 gro_param.max_item_per_flow = MAX_PKT_BURST;
1242 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1243 gro_param.socket_id = rte_lcore_to_socket_id(
1244 fwd_lcores_cpuids[lc_id]);
1245 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1246 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1247 rte_exit(EXIT_FAILURE,
1248 "rte_gro_ctx_create() failed\n");
1252 #if defined RTE_LIBRTE_PMD_SOFTNIC
1253 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1254 RTE_ETH_FOREACH_DEV(pid) {
1256 const char *driver = port->dev_info.driver_name;
1258 if (strcmp(driver, "net_softnic") == 0)
1259 port->softport.fwd_lcore_arg = fwd_lcores;
1268 reconfig(portid_t new_port_id, unsigned socket_id)
1270 struct rte_port *port;
1272 /* Reconfiguration of Ethernet ports. */
1273 port = &ports[new_port_id];
1274 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1276 /* set flag to initialize port/queue */
1277 port->need_reconfig = 1;
1278 port->need_reconfig_queues = 1;
1279 port->socket_id = socket_id;
1286 init_fwd_streams(void)
1289 struct rte_port *port;
1290 streamid_t sm_id, nb_fwd_streams_new;
1293 /* set socket id according to numa or not */
1294 RTE_ETH_FOREACH_DEV(pid) {
1296 if (nb_rxq > port->dev_info.max_rx_queues) {
1297 printf("Fail: nb_rxq(%d) is greater than "
1298 "max_rx_queues(%d)\n", nb_rxq,
1299 port->dev_info.max_rx_queues);
1302 if (nb_txq > port->dev_info.max_tx_queues) {
1303 printf("Fail: nb_txq(%d) is greater than "
1304 "max_tx_queues(%d)\n", nb_txq,
1305 port->dev_info.max_tx_queues);
1309 if (port_numa[pid] != NUMA_NO_CONFIG)
1310 port->socket_id = port_numa[pid];
1312 port->socket_id = rte_eth_dev_socket_id(pid);
1315 * if socket_id is invalid,
1316 * set to the first available socket.
1318 if (check_socket_id(port->socket_id) < 0)
1319 port->socket_id = socket_ids[0];
1323 if (socket_num == UMA_NO_CONFIG)
1324 port->socket_id = 0;
1326 port->socket_id = socket_num;
1330 q = RTE_MAX(nb_rxq, nb_txq);
1332 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1335 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1336 if (nb_fwd_streams_new == nb_fwd_streams)
1339 if (fwd_streams != NULL) {
1340 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1341 if (fwd_streams[sm_id] == NULL)
1343 rte_free(fwd_streams[sm_id]);
1344 fwd_streams[sm_id] = NULL;
1346 rte_free(fwd_streams);
1351 nb_fwd_streams = nb_fwd_streams_new;
1352 if (nb_fwd_streams) {
1353 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1354 sizeof(struct fwd_stream *) * nb_fwd_streams,
1355 RTE_CACHE_LINE_SIZE);
1356 if (fwd_streams == NULL)
1357 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1358 " (struct fwd_stream *)) failed\n",
1361 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1362 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1363 " struct fwd_stream", sizeof(struct fwd_stream),
1364 RTE_CACHE_LINE_SIZE);
1365 if (fwd_streams[sm_id] == NULL)
1366 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1367 "(struct fwd_stream) failed\n");
1374 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1376 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1378 unsigned int total_burst;
1379 unsigned int nb_burst;
1380 unsigned int burst_stats[3];
1381 uint16_t pktnb_stats[3];
1383 int burst_percent[3];
1386 * First compute the total number of packet bursts and the
1387 * two highest numbers of bursts of the same number of packets.
1390 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1391 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1392 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1393 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1396 total_burst += nb_burst;
1397 if (nb_burst > burst_stats[0]) {
1398 burst_stats[1] = burst_stats[0];
1399 pktnb_stats[1] = pktnb_stats[0];
1400 burst_stats[0] = nb_burst;
1401 pktnb_stats[0] = nb_pkt;
1402 } else if (nb_burst > burst_stats[1]) {
1403 burst_stats[1] = nb_burst;
1404 pktnb_stats[1] = nb_pkt;
1407 if (total_burst == 0)
1409 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1410 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1411 burst_percent[0], (int) pktnb_stats[0]);
1412 if (burst_stats[0] == total_burst) {
1416 if (burst_stats[0] + burst_stats[1] == total_burst) {
1417 printf(" + %d%% of %d pkts]\n",
1418 100 - burst_percent[0], pktnb_stats[1]);
1421 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1422 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1423 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1424 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1427 printf(" + %d%% of %d pkts + %d%% of others]\n",
1428 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1430 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1433 fwd_stream_stats_display(streamid_t stream_id)
1435 struct fwd_stream *fs;
1436 static const char *fwd_top_stats_border = "-------";
1438 fs = fwd_streams[stream_id];
1439 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1440 (fs->fwd_dropped == 0))
1442 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1443 "TX Port=%2d/Queue=%2d %s\n",
1444 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1445 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1446 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1447 " TX-dropped: %-14"PRIu64,
1448 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1450 /* if checksum mode */
1451 if (cur_fwd_eng == &csum_fwd_engine) {
1452 printf(" RX- bad IP checksum: %-14"PRIu64
1453 " Rx- bad L4 checksum: %-14"PRIu64
1454 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1455 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1456 fs->rx_bad_outer_l4_csum);
1461 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1462 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1463 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1468 fwd_stats_display(void)
1470 static const char *fwd_stats_border = "----------------------";
1471 static const char *acc_stats_border = "+++++++++++++++";
1473 struct fwd_stream *rx_stream;
1474 struct fwd_stream *tx_stream;
1475 uint64_t tx_dropped;
1476 uint64_t rx_bad_ip_csum;
1477 uint64_t rx_bad_l4_csum;
1478 uint64_t rx_bad_outer_l4_csum;
1479 } ports_stats[RTE_MAX_ETHPORTS];
1480 uint64_t total_rx_dropped = 0;
1481 uint64_t total_tx_dropped = 0;
1482 uint64_t total_rx_nombuf = 0;
1483 struct rte_eth_stats stats;
1484 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1485 uint64_t fwd_cycles = 0;
1487 uint64_t total_recv = 0;
1488 uint64_t total_xmit = 0;
1489 struct rte_port *port;
1494 memset(ports_stats, 0, sizeof(ports_stats));
1496 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1497 struct fwd_stream *fs = fwd_streams[sm_id];
1499 if (cur_fwd_config.nb_fwd_streams >
1500 cur_fwd_config.nb_fwd_ports) {
1501 fwd_stream_stats_display(sm_id);
1503 ports_stats[fs->tx_port].tx_stream = fs;
1504 ports_stats[fs->rx_port].rx_stream = fs;
1507 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1509 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1510 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1511 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1512 fs->rx_bad_outer_l4_csum;
1514 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1515 fwd_cycles += fs->core_cycles;
1518 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1521 pt_id = fwd_ports_ids[i];
1522 port = &ports[pt_id];
1524 rte_eth_stats_get(pt_id, &stats);
1525 stats.ipackets -= port->stats.ipackets;
1526 stats.opackets -= port->stats.opackets;
1527 stats.ibytes -= port->stats.ibytes;
1528 stats.obytes -= port->stats.obytes;
1529 stats.imissed -= port->stats.imissed;
1530 stats.oerrors -= port->stats.oerrors;
1531 stats.rx_nombuf -= port->stats.rx_nombuf;
1533 total_recv += stats.ipackets;
1534 total_xmit += stats.opackets;
1535 total_rx_dropped += stats.imissed;
1536 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1537 total_tx_dropped += stats.oerrors;
1538 total_rx_nombuf += stats.rx_nombuf;
1540 printf("\n %s Forward statistics for port %-2d %s\n",
1541 fwd_stats_border, pt_id, fwd_stats_border);
1543 if (!port->rx_queue_stats_mapping_enabled &&
1544 !port->tx_queue_stats_mapping_enabled) {
1545 printf(" RX-packets: %-14"PRIu64
1546 " RX-dropped: %-14"PRIu64
1547 "RX-total: %-"PRIu64"\n",
1548 stats.ipackets, stats.imissed,
1549 stats.ipackets + stats.imissed);
1551 if (cur_fwd_eng == &csum_fwd_engine)
1552 printf(" Bad-ipcsum: %-14"PRIu64
1553 " Bad-l4csum: %-14"PRIu64
1554 "Bad-outer-l4csum: %-14"PRIu64"\n",
1555 ports_stats[pt_id].rx_bad_ip_csum,
1556 ports_stats[pt_id].rx_bad_l4_csum,
1557 ports_stats[pt_id].rx_bad_outer_l4_csum);
1558 if (stats.ierrors + stats.rx_nombuf > 0) {
1559 printf(" RX-error: %-"PRIu64"\n",
1561 printf(" RX-nombufs: %-14"PRIu64"\n",
1565 printf(" TX-packets: %-14"PRIu64
1566 " TX-dropped: %-14"PRIu64
1567 "TX-total: %-"PRIu64"\n",
1568 stats.opackets, ports_stats[pt_id].tx_dropped,
1569 stats.opackets + ports_stats[pt_id].tx_dropped);
1571 printf(" RX-packets: %14"PRIu64
1572 " RX-dropped:%14"PRIu64
1573 " RX-total:%14"PRIu64"\n",
1574 stats.ipackets, stats.imissed,
1575 stats.ipackets + stats.imissed);
1577 if (cur_fwd_eng == &csum_fwd_engine)
1578 printf(" Bad-ipcsum:%14"PRIu64
1579 " Bad-l4csum:%14"PRIu64
1580 " Bad-outer-l4csum: %-14"PRIu64"\n",
1581 ports_stats[pt_id].rx_bad_ip_csum,
1582 ports_stats[pt_id].rx_bad_l4_csum,
1583 ports_stats[pt_id].rx_bad_outer_l4_csum);
1584 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1585 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1586 printf(" RX-nombufs: %14"PRIu64"\n",
1590 printf(" TX-packets: %14"PRIu64
1591 " TX-dropped:%14"PRIu64
1592 " TX-total:%14"PRIu64"\n",
1593 stats.opackets, ports_stats[pt_id].tx_dropped,
1594 stats.opackets + ports_stats[pt_id].tx_dropped);
1597 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1598 if (ports_stats[pt_id].rx_stream)
1599 pkt_burst_stats_display("RX",
1600 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1601 if (ports_stats[pt_id].tx_stream)
1602 pkt_burst_stats_display("TX",
1603 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1606 if (port->rx_queue_stats_mapping_enabled) {
1608 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1609 printf(" Stats reg %2d RX-packets:%14"PRIu64
1610 " RX-errors:%14"PRIu64
1611 " RX-bytes:%14"PRIu64"\n",
1612 j, stats.q_ipackets[j],
1613 stats.q_errors[j], stats.q_ibytes[j]);
1617 if (port->tx_queue_stats_mapping_enabled) {
1618 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1619 printf(" Stats reg %2d TX-packets:%14"PRIu64
1622 j, stats.q_opackets[j],
1627 printf(" %s--------------------------------%s\n",
1628 fwd_stats_border, fwd_stats_border);
1631 printf("\n %s Accumulated forward statistics for all ports"
1633 acc_stats_border, acc_stats_border);
1634 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1636 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1638 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1639 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1640 if (total_rx_nombuf > 0)
1641 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1642 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1644 acc_stats_border, acc_stats_border);
1645 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1647 printf("\n CPU cycles/packet=%u (total cycles="
1648 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1649 (unsigned int)(fwd_cycles / total_recv),
1650 fwd_cycles, total_recv);
1655 fwd_stats_reset(void)
1661 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1662 pt_id = fwd_ports_ids[i];
1663 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1665 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1666 struct fwd_stream *fs = fwd_streams[sm_id];
1670 fs->fwd_dropped = 0;
1671 fs->rx_bad_ip_csum = 0;
1672 fs->rx_bad_l4_csum = 0;
1673 fs->rx_bad_outer_l4_csum = 0;
1675 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1676 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1677 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1679 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1680 fs->core_cycles = 0;
1686 flush_fwd_rx_queues(void)
1688 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1695 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1696 uint64_t timer_period;
1698 /* convert to number of cycles */
1699 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1701 for (j = 0; j < 2; j++) {
1702 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1703 for (rxq = 0; rxq < nb_rxq; rxq++) {
1704 port_id = fwd_ports_ids[rxp];
1706 * testpmd can stuck in the below do while loop
1707 * if rte_eth_rx_burst() always returns nonzero
1708 * packets. So timer is added to exit this loop
1709 * after 1sec timer expiry.
1711 prev_tsc = rte_rdtsc();
1713 nb_rx = rte_eth_rx_burst(port_id, rxq,
1714 pkts_burst, MAX_PKT_BURST);
1715 for (i = 0; i < nb_rx; i++)
1716 rte_pktmbuf_free(pkts_burst[i]);
1718 cur_tsc = rte_rdtsc();
1719 diff_tsc = cur_tsc - prev_tsc;
1720 timer_tsc += diff_tsc;
1721 } while ((nb_rx > 0) &&
1722 (timer_tsc < timer_period));
1726 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1731 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1733 struct fwd_stream **fsm;
1736 #ifdef RTE_LIBRTE_BITRATE
1737 uint64_t tics_per_1sec;
1738 uint64_t tics_datum;
1739 uint64_t tics_current;
1740 uint16_t i, cnt_ports;
1742 cnt_ports = nb_ports;
1743 tics_datum = rte_rdtsc();
1744 tics_per_1sec = rte_get_timer_hz();
1746 fsm = &fwd_streams[fc->stream_idx];
1747 nb_fs = fc->stream_nb;
1749 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1750 (*pkt_fwd)(fsm[sm_id]);
1751 #ifdef RTE_LIBRTE_BITRATE
1752 if (bitrate_enabled != 0 &&
1753 bitrate_lcore_id == rte_lcore_id()) {
1754 tics_current = rte_rdtsc();
1755 if (tics_current - tics_datum >= tics_per_1sec) {
1756 /* Periodic bitrate calculation */
1757 for (i = 0; i < cnt_ports; i++)
1758 rte_stats_bitrate_calc(bitrate_data,
1760 tics_datum = tics_current;
1764 #ifdef RTE_LIBRTE_LATENCY_STATS
1765 if (latencystats_enabled != 0 &&
1766 latencystats_lcore_id == rte_lcore_id())
1767 rte_latencystats_update();
1770 } while (! fc->stopped);
1774 start_pkt_forward_on_core(void *fwd_arg)
1776 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1777 cur_fwd_config.fwd_eng->packet_fwd);
1782 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1783 * Used to start communication flows in network loopback test configurations.
1786 run_one_txonly_burst_on_core(void *fwd_arg)
1788 struct fwd_lcore *fwd_lc;
1789 struct fwd_lcore tmp_lcore;
1791 fwd_lc = (struct fwd_lcore *) fwd_arg;
1792 tmp_lcore = *fwd_lc;
1793 tmp_lcore.stopped = 1;
1794 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1799 * Launch packet forwarding:
1800 * - Setup per-port forwarding context.
1801 * - launch logical cores with their forwarding configuration.
1804 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1806 port_fwd_begin_t port_fwd_begin;
1811 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1812 if (port_fwd_begin != NULL) {
1813 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1814 (*port_fwd_begin)(fwd_ports_ids[i]);
1816 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1817 lc_id = fwd_lcores_cpuids[i];
1818 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1819 fwd_lcores[i]->stopped = 0;
1820 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1821 fwd_lcores[i], lc_id);
1823 printf("launch lcore %u failed - diag=%d\n",
1830 * Launch packet forwarding configuration.
1833 start_packet_forwarding(int with_tx_first)
1835 port_fwd_begin_t port_fwd_begin;
1836 port_fwd_end_t port_fwd_end;
1837 struct rte_port *port;
1841 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1842 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1844 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1845 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1847 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1848 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1849 (!nb_rxq || !nb_txq))
1850 rte_exit(EXIT_FAILURE,
1851 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1852 cur_fwd_eng->fwd_mode_name);
1854 if (all_ports_started() == 0) {
1855 printf("Not all ports were started\n");
1858 if (test_done == 0) {
1859 printf("Packet forwarding already started\n");
1865 for (i = 0; i < nb_fwd_ports; i++) {
1866 pt_id = fwd_ports_ids[i];
1867 port = &ports[pt_id];
1868 if (!port->dcb_flag) {
1869 printf("In DCB mode, all forwarding ports must "
1870 "be configured in this mode.\n");
1874 if (nb_fwd_lcores == 1) {
1875 printf("In DCB mode,the nb forwarding cores "
1876 "should be larger than 1.\n");
1885 flush_fwd_rx_queues();
1887 pkt_fwd_config_display(&cur_fwd_config);
1888 rxtx_config_display();
1891 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1892 pt_id = fwd_ports_ids[i];
1893 port = &ports[pt_id];
1894 map_port_queue_stats_mapping_registers(pt_id, port);
1896 if (with_tx_first) {
1897 port_fwd_begin = tx_only_engine.port_fwd_begin;
1898 if (port_fwd_begin != NULL) {
1899 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1900 (*port_fwd_begin)(fwd_ports_ids[i]);
1902 while (with_tx_first--) {
1903 launch_packet_forwarding(
1904 run_one_txonly_burst_on_core);
1905 rte_eal_mp_wait_lcore();
1907 port_fwd_end = tx_only_engine.port_fwd_end;
1908 if (port_fwd_end != NULL) {
1909 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1910 (*port_fwd_end)(fwd_ports_ids[i]);
1913 launch_packet_forwarding(start_pkt_forward_on_core);
1917 stop_packet_forwarding(void)
1919 port_fwd_end_t port_fwd_end;
1925 printf("Packet forwarding not started\n");
1928 printf("Telling cores to stop...");
1929 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1930 fwd_lcores[lc_id]->stopped = 1;
1931 printf("\nWaiting for lcores to finish...\n");
1932 rte_eal_mp_wait_lcore();
1933 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1934 if (port_fwd_end != NULL) {
1935 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1936 pt_id = fwd_ports_ids[i];
1937 (*port_fwd_end)(pt_id);
1941 fwd_stats_display();
1943 printf("\nDone.\n");
1948 dev_set_link_up(portid_t pid)
1950 if (rte_eth_dev_set_link_up(pid) < 0)
1951 printf("\nSet link up fail.\n");
1955 dev_set_link_down(portid_t pid)
1957 if (rte_eth_dev_set_link_down(pid) < 0)
1958 printf("\nSet link down fail.\n");
1962 all_ports_started(void)
1965 struct rte_port *port;
1967 RTE_ETH_FOREACH_DEV(pi) {
1969 /* Check if there is a port which is not started */
1970 if ((port->port_status != RTE_PORT_STARTED) &&
1971 (port->slave_flag == 0))
1975 /* No port is not started */
1980 port_is_stopped(portid_t port_id)
1982 struct rte_port *port = &ports[port_id];
1984 if ((port->port_status != RTE_PORT_STOPPED) &&
1985 (port->slave_flag == 0))
1991 all_ports_stopped(void)
1995 RTE_ETH_FOREACH_DEV(pi) {
1996 if (!port_is_stopped(pi))
2004 port_is_started(portid_t port_id)
2006 if (port_id_is_invalid(port_id, ENABLED_WARN))
2009 if (ports[port_id].port_status != RTE_PORT_STARTED)
2016 start_port(portid_t pid)
2018 int diag, need_check_link_status = -1;
2021 struct rte_port *port;
2022 struct rte_ether_addr mac_addr;
2024 if (port_id_is_invalid(pid, ENABLED_WARN))
2029 RTE_ETH_FOREACH_DEV(pi) {
2030 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2033 need_check_link_status = 0;
2035 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2036 RTE_PORT_HANDLING) == 0) {
2037 printf("Port %d is now not stopped\n", pi);
2041 if (port->need_reconfig > 0) {
2042 port->need_reconfig = 0;
2044 if (flow_isolate_all) {
2045 int ret = port_flow_isolate(pi, 1);
2047 printf("Failed to apply isolated"
2048 " mode on port %d\n", pi);
2052 configure_rxtx_dump_callbacks(0);
2053 printf("Configuring Port %d (socket %u)\n", pi,
2055 /* configure port */
2056 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2059 if (rte_atomic16_cmpset(&(port->port_status),
2060 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2061 printf("Port %d can not be set back "
2062 "to stopped\n", pi);
2063 printf("Fail to configure port %d\n", pi);
2064 /* try to reconfigure port next time */
2065 port->need_reconfig = 1;
2069 if (port->need_reconfig_queues > 0) {
2070 port->need_reconfig_queues = 0;
2071 /* setup tx queues */
2072 for (qi = 0; qi < nb_txq; qi++) {
2073 if ((numa_support) &&
2074 (txring_numa[pi] != NUMA_NO_CONFIG))
2075 diag = rte_eth_tx_queue_setup(pi, qi,
2076 port->nb_tx_desc[qi],
2078 &(port->tx_conf[qi]));
2080 diag = rte_eth_tx_queue_setup(pi, qi,
2081 port->nb_tx_desc[qi],
2083 &(port->tx_conf[qi]));
2088 /* Fail to setup tx queue, return */
2089 if (rte_atomic16_cmpset(&(port->port_status),
2091 RTE_PORT_STOPPED) == 0)
2092 printf("Port %d can not be set back "
2093 "to stopped\n", pi);
2094 printf("Fail to configure port %d tx queues\n",
2096 /* try to reconfigure queues next time */
2097 port->need_reconfig_queues = 1;
2100 for (qi = 0; qi < nb_rxq; qi++) {
2101 /* setup rx queues */
2102 if ((numa_support) &&
2103 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2104 struct rte_mempool * mp =
2105 mbuf_pool_find(rxring_numa[pi]);
2107 printf("Failed to setup RX queue:"
2108 "No mempool allocation"
2109 " on the socket %d\n",
2114 diag = rte_eth_rx_queue_setup(pi, qi,
2115 port->nb_rx_desc[qi],
2117 &(port->rx_conf[qi]),
2120 struct rte_mempool *mp =
2121 mbuf_pool_find(port->socket_id);
2123 printf("Failed to setup RX queue:"
2124 "No mempool allocation"
2125 " on the socket %d\n",
2129 diag = rte_eth_rx_queue_setup(pi, qi,
2130 port->nb_rx_desc[qi],
2132 &(port->rx_conf[qi]),
2138 /* Fail to setup rx queue, return */
2139 if (rte_atomic16_cmpset(&(port->port_status),
2141 RTE_PORT_STOPPED) == 0)
2142 printf("Port %d can not be set back "
2143 "to stopped\n", pi);
2144 printf("Fail to configure port %d rx queues\n",
2146 /* try to reconfigure queues next time */
2147 port->need_reconfig_queues = 1;
2151 configure_rxtx_dump_callbacks(verbose_level);
2153 if (rte_eth_dev_start(pi) < 0) {
2154 printf("Fail to start port %d\n", pi);
2156 /* Fail to setup rx queue, return */
2157 if (rte_atomic16_cmpset(&(port->port_status),
2158 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2159 printf("Port %d can not be set back to "
2164 if (rte_atomic16_cmpset(&(port->port_status),
2165 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2166 printf("Port %d can not be set into started\n", pi);
2168 rte_eth_macaddr_get(pi, &mac_addr);
2169 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2170 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2171 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2172 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2174 /* at least one port started, need checking link status */
2175 need_check_link_status = 1;
2178 if (need_check_link_status == 1 && !no_link_check)
2179 check_all_ports_link_status(RTE_PORT_ALL);
2180 else if (need_check_link_status == 0)
2181 printf("Please stop the ports first\n");
2188 stop_port(portid_t pid)
2191 struct rte_port *port;
2192 int need_check_link_status = 0;
2199 if (port_id_is_invalid(pid, ENABLED_WARN))
2202 printf("Stopping ports...\n");
2204 RTE_ETH_FOREACH_DEV(pi) {
2205 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2208 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2209 printf("Please remove port %d from forwarding configuration.\n", pi);
2213 if (port_is_bonding_slave(pi)) {
2214 printf("Please remove port %d from bonded device.\n", pi);
2219 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2220 RTE_PORT_HANDLING) == 0)
2223 rte_eth_dev_stop(pi);
2225 if (rte_atomic16_cmpset(&(port->port_status),
2226 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2227 printf("Port %d can not be set into stopped\n", pi);
2228 need_check_link_status = 1;
2230 if (need_check_link_status && !no_link_check)
2231 check_all_ports_link_status(RTE_PORT_ALL);
2237 remove_invalid_ports_in(portid_t *array, portid_t *total)
2240 portid_t new_total = 0;
2242 for (i = 0; i < *total; i++)
2243 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2244 array[new_total] = array[i];
2251 remove_invalid_ports(void)
2253 remove_invalid_ports_in(ports_ids, &nb_ports);
2254 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2255 nb_cfg_ports = nb_fwd_ports;
2259 close_port(portid_t pid)
2262 struct rte_port *port;
2264 if (port_id_is_invalid(pid, ENABLED_WARN))
2267 printf("Closing ports...\n");
2269 RTE_ETH_FOREACH_DEV(pi) {
2270 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2273 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2274 printf("Please remove port %d from forwarding configuration.\n", pi);
2278 if (port_is_bonding_slave(pi)) {
2279 printf("Please remove port %d from bonded device.\n", pi);
2284 if (rte_atomic16_cmpset(&(port->port_status),
2285 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2286 printf("Port %d is already closed\n", pi);
2290 if (rte_atomic16_cmpset(&(port->port_status),
2291 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2292 printf("Port %d is now not stopped\n", pi);
2296 if (port->flow_list)
2297 port_flow_flush(pi);
2298 rte_eth_dev_close(pi);
2300 remove_invalid_ports();
2302 if (rte_atomic16_cmpset(&(port->port_status),
2303 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2304 printf("Port %d cannot be set to closed\n", pi);
2311 reset_port(portid_t pid)
2315 struct rte_port *port;
2317 if (port_id_is_invalid(pid, ENABLED_WARN))
2320 printf("Resetting ports...\n");
2322 RTE_ETH_FOREACH_DEV(pi) {
2323 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2326 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2327 printf("Please remove port %d from forwarding "
2328 "configuration.\n", pi);
2332 if (port_is_bonding_slave(pi)) {
2333 printf("Please remove port %d from bonded device.\n",
2338 diag = rte_eth_dev_reset(pi);
2341 port->need_reconfig = 1;
2342 port->need_reconfig_queues = 1;
2344 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2352 attach_port(char *identifier)
2355 struct rte_dev_iterator iterator;
2357 printf("Attaching a new port...\n");
2359 if (identifier == NULL) {
2360 printf("Invalid parameters are specified\n");
2364 if (rte_dev_probe(identifier) != 0) {
2365 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2369 /* first attach mode: event */
2370 if (setup_on_probe_event) {
2371 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2372 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2373 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2374 ports[pi].need_setup != 0)
2375 setup_attached_port(pi);
2379 /* second attach mode: iterator */
2380 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2381 /* setup ports matching the devargs used for probing */
2382 if (port_is_forwarding(pi))
2383 continue; /* port was already attached before */
2384 setup_attached_port(pi);
2389 setup_attached_port(portid_t pi)
2391 unsigned int socket_id;
2393 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2394 /* if socket_id is invalid, set to the first available socket. */
2395 if (check_socket_id(socket_id) < 0)
2396 socket_id = socket_ids[0];
2397 reconfig(pi, socket_id);
2398 rte_eth_promiscuous_enable(pi);
2400 ports_ids[nb_ports++] = pi;
2401 fwd_ports_ids[nb_fwd_ports++] = pi;
2402 nb_cfg_ports = nb_fwd_ports;
2403 ports[pi].need_setup = 0;
2404 ports[pi].port_status = RTE_PORT_STOPPED;
2406 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2411 detach_port_device(portid_t port_id)
2413 struct rte_device *dev;
2416 printf("Removing a device...\n");
2418 dev = rte_eth_devices[port_id].device;
2420 printf("Device already removed\n");
2424 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2425 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2426 printf("Port not stopped\n");
2429 printf("Port was not closed\n");
2430 if (ports[port_id].flow_list)
2431 port_flow_flush(port_id);
2434 if (rte_dev_remove(dev) != 0) {
2435 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2438 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2439 /* reset mapping between old ports and removed device */
2440 rte_eth_devices[sibling].device = NULL;
2441 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2442 /* sibling ports are forced to be closed */
2443 ports[sibling].port_status = RTE_PORT_CLOSED;
2444 printf("Port %u is closed\n", sibling);
2448 remove_invalid_ports();
2450 printf("Device of port %u is detached\n", port_id);
2451 printf("Now total ports is %d\n", nb_ports);
2459 struct rte_device *device;
2465 stop_packet_forwarding();
2467 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2469 if (mp_alloc_type == MP_ALLOC_ANON)
2470 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2474 if (ports != NULL) {
2476 RTE_ETH_FOREACH_DEV(pt_id) {
2477 printf("\nStopping port %d...\n", pt_id);
2481 RTE_ETH_FOREACH_DEV(pt_id) {
2482 printf("\nShutting down port %d...\n", pt_id);
2487 * This is a workaround to fix a virtio-user issue that
2488 * requires to call clean-up routine to remove existing
2490 * This workaround valid only for testpmd, needs a fix
2491 * valid for all applications.
2492 * TODO: Implement proper resource cleanup
2494 device = rte_eth_devices[pt_id].device;
2495 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2496 detach_port_device(pt_id);
2501 ret = rte_dev_event_monitor_stop();
2504 "fail to stop device event monitor.");
2508 ret = rte_dev_event_callback_unregister(NULL,
2509 dev_event_callback, NULL);
2512 "fail to unregister device event callback.\n");
2516 ret = rte_dev_hotplug_handle_disable();
2519 "fail to disable hotplug handling.\n");
2523 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2525 rte_mempool_free(mempools[i]);
2528 printf("\nBye...\n");
2531 typedef void (*cmd_func_t)(void);
2532 struct pmd_test_command {
2533 const char *cmd_name;
2534 cmd_func_t cmd_func;
2537 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2539 /* Check the link status of all ports in up to 9s, and print them finally */
2541 check_all_ports_link_status(uint32_t port_mask)
2543 #define CHECK_INTERVAL 100 /* 100ms */
2544 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2546 uint8_t count, all_ports_up, print_flag = 0;
2547 struct rte_eth_link link;
2549 printf("Checking link statuses...\n");
2551 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2553 RTE_ETH_FOREACH_DEV(portid) {
2554 if ((port_mask & (1 << portid)) == 0)
2556 memset(&link, 0, sizeof(link));
2557 rte_eth_link_get_nowait(portid, &link);
2558 /* print link status if flag set */
2559 if (print_flag == 1) {
2560 if (link.link_status)
2562 "Port%d Link Up. speed %u Mbps- %s\n",
2563 portid, link.link_speed,
2564 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2565 ("full-duplex") : ("half-duplex\n"));
2567 printf("Port %d Link Down\n", portid);
2570 /* clear all_ports_up flag if any link down */
2571 if (link.link_status == ETH_LINK_DOWN) {
2576 /* after finally printing all link status, get out */
2577 if (print_flag == 1)
2580 if (all_ports_up == 0) {
2582 rte_delay_ms(CHECK_INTERVAL);
2585 /* set the print_flag if all ports up or timeout */
2586 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2596 * This callback is for remove a port for a device. It has limitation because
2597 * it is not for multiple port removal for a device.
2598 * TODO: the device detach invoke will plan to be removed from user side to
2599 * eal. And convert all PMDs to free port resources on ether device closing.
2602 rmv_port_callback(void *arg)
2604 int need_to_start = 0;
2605 int org_no_link_check = no_link_check;
2606 portid_t port_id = (intptr_t)arg;
2608 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2610 if (!test_done && port_is_forwarding(port_id)) {
2612 stop_packet_forwarding();
2616 no_link_check = org_no_link_check;
2617 close_port(port_id);
2618 detach_port_device(port_id);
2620 start_packet_forwarding(0);
2623 /* This function is used by the interrupt thread */
2625 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2628 RTE_SET_USED(param);
2629 RTE_SET_USED(ret_param);
2631 if (type >= RTE_ETH_EVENT_MAX) {
2632 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2633 port_id, __func__, type);
2635 } else if (event_print_mask & (UINT32_C(1) << type)) {
2636 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2637 eth_event_desc[type]);
2642 case RTE_ETH_EVENT_NEW:
2643 ports[port_id].need_setup = 1;
2644 ports[port_id].port_status = RTE_PORT_HANDLING;
2646 case RTE_ETH_EVENT_INTR_RMV:
2647 if (port_id_is_invalid(port_id, DISABLED_WARN))
2649 if (rte_eal_alarm_set(100000,
2650 rmv_port_callback, (void *)(intptr_t)port_id))
2651 fprintf(stderr, "Could not set up deferred device removal\n");
2660 register_eth_event_callback(void)
2663 enum rte_eth_event_type event;
2665 for (event = RTE_ETH_EVENT_UNKNOWN;
2666 event < RTE_ETH_EVENT_MAX; event++) {
2667 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2672 TESTPMD_LOG(ERR, "Failed to register callback for "
2673 "%s event\n", eth_event_desc[event]);
2681 /* This function is used by the interrupt thread */
2683 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2684 __rte_unused void *arg)
2689 if (type >= RTE_DEV_EVENT_MAX) {
2690 fprintf(stderr, "%s called upon invalid event %d\n",
2696 case RTE_DEV_EVENT_REMOVE:
2697 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2699 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2701 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2706 * Because the user's callback is invoked in eal interrupt
2707 * callback, the interrupt callback need to be finished before
2708 * it can be unregistered when detaching device. So finish
2709 * callback soon and use a deferred removal to detach device
2710 * is need. It is a workaround, once the device detaching be
2711 * moved into the eal in the future, the deferred removal could
2714 if (rte_eal_alarm_set(100000,
2715 rmv_port_callback, (void *)(intptr_t)port_id))
2717 "Could not set up deferred device removal\n");
2719 case RTE_DEV_EVENT_ADD:
2720 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2722 /* TODO: After finish kernel driver binding,
2723 * begin to attach port.
2732 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2736 uint8_t mapping_found = 0;
2738 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2739 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2740 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2741 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2742 tx_queue_stats_mappings[i].queue_id,
2743 tx_queue_stats_mappings[i].stats_counter_id);
2750 port->tx_queue_stats_mapping_enabled = 1;
2755 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2759 uint8_t mapping_found = 0;
2761 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2762 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2763 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2764 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2765 rx_queue_stats_mappings[i].queue_id,
2766 rx_queue_stats_mappings[i].stats_counter_id);
2773 port->rx_queue_stats_mapping_enabled = 1;
2778 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2782 diag = set_tx_queue_stats_mapping_registers(pi, port);
2784 if (diag == -ENOTSUP) {
2785 port->tx_queue_stats_mapping_enabled = 0;
2786 printf("TX queue stats mapping not supported port id=%d\n", pi);
2789 rte_exit(EXIT_FAILURE,
2790 "set_tx_queue_stats_mapping_registers "
2791 "failed for port id=%d diag=%d\n",
2795 diag = set_rx_queue_stats_mapping_registers(pi, port);
2797 if (diag == -ENOTSUP) {
2798 port->rx_queue_stats_mapping_enabled = 0;
2799 printf("RX queue stats mapping not supported port id=%d\n", pi);
2802 rte_exit(EXIT_FAILURE,
2803 "set_rx_queue_stats_mapping_registers "
2804 "failed for port id=%d diag=%d\n",
2810 rxtx_port_config(struct rte_port *port)
2815 for (qid = 0; qid < nb_rxq; qid++) {
2816 offloads = port->rx_conf[qid].offloads;
2817 port->rx_conf[qid] = port->dev_info.default_rxconf;
2818 port->rx_conf[qid].offloads |= offloads;
2820 /* Check if any Rx parameters have been passed */
2821 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2822 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2824 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2825 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2827 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2828 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2830 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2831 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2833 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2834 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2836 port->nb_rx_desc[qid] = nb_rxd;
2839 for (qid = 0; qid < nb_txq; qid++) {
2840 offloads = port->tx_conf[qid].offloads;
2841 port->tx_conf[qid] = port->dev_info.default_txconf;
2842 port->tx_conf[qid].offloads |= offloads;
2844 /* Check if any Tx parameters have been passed */
2845 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2846 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2848 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2849 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2851 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2852 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2854 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2855 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2857 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2858 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2860 port->nb_tx_desc[qid] = nb_txd;
2865 init_port_config(void)
2868 struct rte_port *port;
2870 RTE_ETH_FOREACH_DEV(pid) {
2872 port->dev_conf.fdir_conf = fdir_conf;
2873 rte_eth_dev_info_get(pid, &port->dev_info);
2875 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2876 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2877 rss_hf & port->dev_info.flow_type_rss_offloads;
2879 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2880 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2883 if (port->dcb_flag == 0) {
2884 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2885 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2887 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2890 rxtx_port_config(port);
2892 rte_eth_macaddr_get(pid, &port->eth_addr);
2894 map_port_queue_stats_mapping_registers(pid, port);
2895 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2896 rte_pmd_ixgbe_bypass_init(pid);
2899 if (lsc_interrupt &&
2900 (rte_eth_devices[pid].data->dev_flags &
2901 RTE_ETH_DEV_INTR_LSC))
2902 port->dev_conf.intr_conf.lsc = 1;
2903 if (rmv_interrupt &&
2904 (rte_eth_devices[pid].data->dev_flags &
2905 RTE_ETH_DEV_INTR_RMV))
2906 port->dev_conf.intr_conf.rmv = 1;
2910 void set_port_slave_flag(portid_t slave_pid)
2912 struct rte_port *port;
2914 port = &ports[slave_pid];
2915 port->slave_flag = 1;
2918 void clear_port_slave_flag(portid_t slave_pid)
2920 struct rte_port *port;
2922 port = &ports[slave_pid];
2923 port->slave_flag = 0;
2926 uint8_t port_is_bonding_slave(portid_t slave_pid)
2928 struct rte_port *port;
2930 port = &ports[slave_pid];
2931 if ((rte_eth_devices[slave_pid].data->dev_flags &
2932 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2937 const uint16_t vlan_tags[] = {
2938 0, 1, 2, 3, 4, 5, 6, 7,
2939 8, 9, 10, 11, 12, 13, 14, 15,
2940 16, 17, 18, 19, 20, 21, 22, 23,
2941 24, 25, 26, 27, 28, 29, 30, 31
2945 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2946 enum dcb_mode_enable dcb_mode,
2947 enum rte_eth_nb_tcs num_tcs,
2952 struct rte_eth_rss_conf rss_conf;
2955 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2956 * given above, and the number of traffic classes available for use.
2958 if (dcb_mode == DCB_VT_ENABLED) {
2959 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2960 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2961 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2962 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2964 /* VMDQ+DCB RX and TX configurations */
2965 vmdq_rx_conf->enable_default_pool = 0;
2966 vmdq_rx_conf->default_pool = 0;
2967 vmdq_rx_conf->nb_queue_pools =
2968 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2969 vmdq_tx_conf->nb_queue_pools =
2970 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2972 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2973 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2974 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2975 vmdq_rx_conf->pool_map[i].pools =
2976 1 << (i % vmdq_rx_conf->nb_queue_pools);
2978 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2979 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2980 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2983 /* set DCB mode of RX and TX of multiple queues */
2984 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2985 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2987 struct rte_eth_dcb_rx_conf *rx_conf =
2988 ð_conf->rx_adv_conf.dcb_rx_conf;
2989 struct rte_eth_dcb_tx_conf *tx_conf =
2990 ð_conf->tx_adv_conf.dcb_tx_conf;
2992 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2996 rx_conf->nb_tcs = num_tcs;
2997 tx_conf->nb_tcs = num_tcs;
2999 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3000 rx_conf->dcb_tc[i] = i % num_tcs;
3001 tx_conf->dcb_tc[i] = i % num_tcs;
3004 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3005 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3006 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3010 eth_conf->dcb_capability_en =
3011 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3013 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3019 init_port_dcb_config(portid_t pid,
3020 enum dcb_mode_enable dcb_mode,
3021 enum rte_eth_nb_tcs num_tcs,
3024 struct rte_eth_conf port_conf;
3025 struct rte_port *rte_port;
3029 rte_port = &ports[pid];
3031 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3032 /* Enter DCB configuration status */
3035 port_conf.rxmode = rte_port->dev_conf.rxmode;
3036 port_conf.txmode = rte_port->dev_conf.txmode;
3038 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3039 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3042 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3044 /* re-configure the device . */
3045 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3048 rte_eth_dev_info_get(pid, &rte_port->dev_info);
3050 /* If dev_info.vmdq_pool_base is greater than 0,
3051 * the queue id of vmdq pools is started after pf queues.
3053 if (dcb_mode == DCB_VT_ENABLED &&
3054 rte_port->dev_info.vmdq_pool_base > 0) {
3055 printf("VMDQ_DCB multi-queue mode is nonsensical"
3056 " for port %d.", pid);
3060 /* Assume the ports in testpmd have the same dcb capability
3061 * and has the same number of rxq and txq in dcb mode
3063 if (dcb_mode == DCB_VT_ENABLED) {
3064 if (rte_port->dev_info.max_vfs > 0) {
3065 nb_rxq = rte_port->dev_info.nb_rx_queues;
3066 nb_txq = rte_port->dev_info.nb_tx_queues;
3068 nb_rxq = rte_port->dev_info.max_rx_queues;
3069 nb_txq = rte_port->dev_info.max_tx_queues;
3072 /*if vt is disabled, use all pf queues */
3073 if (rte_port->dev_info.vmdq_pool_base == 0) {
3074 nb_rxq = rte_port->dev_info.max_rx_queues;
3075 nb_txq = rte_port->dev_info.max_tx_queues;
3077 nb_rxq = (queueid_t)num_tcs;
3078 nb_txq = (queueid_t)num_tcs;
3082 rx_free_thresh = 64;
3084 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3086 rxtx_port_config(rte_port);
3088 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3089 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3090 rx_vft_set(pid, vlan_tags[i], 1);
3092 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3093 map_port_queue_stats_mapping_registers(pid, rte_port);
3095 rte_port->dcb_flag = 1;
3103 /* Configuration of Ethernet ports. */
3104 ports = rte_zmalloc("testpmd: ports",
3105 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3106 RTE_CACHE_LINE_SIZE);
3107 if (ports == NULL) {
3108 rte_exit(EXIT_FAILURE,
3109 "rte_zmalloc(%d struct rte_port) failed\n",
3113 /* Initialize ports NUMA structures */
3114 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3115 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3116 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3130 const char clr[] = { 27, '[', '2', 'J', '\0' };
3131 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3133 /* Clear screen and move to top left */
3134 printf("%s%s", clr, top_left);
3136 printf("\nPort statistics ====================================");
3137 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3138 nic_stats_display(fwd_ports_ids[i]);
3144 signal_handler(int signum)
3146 if (signum == SIGINT || signum == SIGTERM) {
3147 printf("\nSignal %d received, preparing to exit...\n",
3149 #ifdef RTE_LIBRTE_PDUMP
3150 /* uninitialize packet capture framework */
3153 #ifdef RTE_LIBRTE_LATENCY_STATS
3154 rte_latencystats_uninit();
3157 /* Set flag to indicate the force termination. */
3159 /* exit with the expected status */
3160 signal(signum, SIG_DFL);
3161 kill(getpid(), signum);
3166 main(int argc, char** argv)
3173 signal(SIGINT, signal_handler);
3174 signal(SIGTERM, signal_handler);
3176 diag = rte_eal_init(argc, argv);
3178 rte_panic("Cannot init EAL\n");
3180 testpmd_logtype = rte_log_register("testpmd");
3181 if (testpmd_logtype < 0)
3182 rte_panic("Cannot register log type");
3183 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3185 ret = register_eth_event_callback();
3187 rte_panic("Cannot register for ethdev events");
3189 #ifdef RTE_LIBRTE_PDUMP
3190 /* initialize packet capture framework */
3195 RTE_ETH_FOREACH_DEV(port_id) {
3196 ports_ids[count] = port_id;
3199 nb_ports = (portid_t) count;
3201 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3203 /* allocate port structures, and init them */
3206 set_def_fwd_config();
3208 rte_panic("Empty set of forwarding logical cores - check the "
3209 "core mask supplied in the command parameters\n");
3211 /* Bitrate/latency stats disabled by default */
3212 #ifdef RTE_LIBRTE_BITRATE
3213 bitrate_enabled = 0;
3215 #ifdef RTE_LIBRTE_LATENCY_STATS
3216 latencystats_enabled = 0;
3219 /* on FreeBSD, mlockall() is disabled by default */
3220 #ifdef RTE_EXEC_ENV_FREEBSD
3229 launch_args_parse(argc, argv);
3231 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3232 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3236 if (tx_first && interactive)
3237 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3238 "interactive mode.\n");
3240 if (tx_first && lsc_interrupt) {
3241 printf("Warning: lsc_interrupt needs to be off when "
3242 " using tx_first. Disabling.\n");
3246 if (!nb_rxq && !nb_txq)
3247 printf("Warning: Either rx or tx queues should be non-zero\n");
3249 if (nb_rxq > 1 && nb_rxq > nb_txq)
3250 printf("Warning: nb_rxq=%d enables RSS configuration, "
3251 "but nb_txq=%d will prevent to fully test it.\n",
3257 ret = rte_dev_hotplug_handle_enable();
3260 "fail to enable hotplug handling.");
3264 ret = rte_dev_event_monitor_start();
3267 "fail to start device event monitoring.");
3271 ret = rte_dev_event_callback_register(NULL,
3272 dev_event_callback, NULL);
3275 "fail to register device event callback\n");
3280 if (start_port(RTE_PORT_ALL) != 0)
3281 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3283 /* set all ports to promiscuous mode by default */
3284 RTE_ETH_FOREACH_DEV(port_id)
3285 rte_eth_promiscuous_enable(port_id);
3287 /* Init metrics library */
3288 rte_metrics_init(rte_socket_id());
3290 #ifdef RTE_LIBRTE_LATENCY_STATS
3291 if (latencystats_enabled != 0) {
3292 int ret = rte_latencystats_init(1, NULL);
3294 printf("Warning: latencystats init()"
3295 " returned error %d\n", ret);
3296 printf("Latencystats running on lcore %d\n",
3297 latencystats_lcore_id);
3301 /* Setup bitrate stats */
3302 #ifdef RTE_LIBRTE_BITRATE
3303 if (bitrate_enabled != 0) {
3304 bitrate_data = rte_stats_bitrate_create();
3305 if (bitrate_data == NULL)
3306 rte_exit(EXIT_FAILURE,
3307 "Could not allocate bitrate data.\n");
3308 rte_stats_bitrate_reg(bitrate_data);
3312 #ifdef RTE_LIBRTE_CMDLINE
3313 if (strlen(cmdline_filename) != 0)
3314 cmdline_read_from_file(cmdline_filename);
3316 if (interactive == 1) {
3318 printf("Start automatic packet forwarding\n");
3319 start_packet_forwarding(0);
3331 printf("No commandline core given, start packet forwarding\n");
3332 start_packet_forwarding(tx_first);
3333 if (stats_period != 0) {
3334 uint64_t prev_time = 0, cur_time, diff_time = 0;
3335 uint64_t timer_period;
3337 /* Convert to number of cycles */
3338 timer_period = stats_period * rte_get_timer_hz();
3340 while (f_quit == 0) {
3341 cur_time = rte_get_timer_cycles();
3342 diff_time += cur_time - prev_time;
3344 if (diff_time >= timer_period) {
3346 /* Reset the timer */
3349 /* Sleep to avoid unnecessary checks */
3350 prev_time = cur_time;
3355 printf("Press enter to exit\n");
3356 rc = read(0, &c, 1);