1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
192 uint16_t mempool_flags;
194 struct fwd_config cur_fwd_config;
195 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
196 uint32_t retry_enabled;
197 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
198 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
200 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
201 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
202 * specified on command-line. */
203 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
206 * In container, it cannot terminate the process which running with 'stats-period'
207 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
212 * Configuration of packet segments used by the "txonly" processing engine.
214 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
215 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
216 TXONLY_DEF_PACKET_LEN,
218 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
220 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
221 /**< Split policy for packets to TX. */
223 uint8_t txonly_multi_flow;
224 /**< Whether multiple flows are generated in TXONLY mode. */
226 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
227 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
229 /* current configuration is in DCB or not,0 means it is not in DCB mode */
230 uint8_t dcb_config = 0;
232 /* Whether the dcb is in testing status */
233 uint8_t dcb_test = 0;
236 * Configurable number of RX/TX queues.
238 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
239 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
242 * Configurable number of RX/TX ring descriptors.
243 * Defaults are supplied by drivers via ethdev.
245 #define RTE_TEST_RX_DESC_DEFAULT 0
246 #define RTE_TEST_TX_DESC_DEFAULT 0
247 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
248 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
250 #define RTE_PMD_PARAM_UNSET -1
252 * Configurable values of RX and TX ring threshold registers.
255 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
256 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
257 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
259 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
260 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
261 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
264 * Configurable value of RX free threshold.
266 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
269 * Configurable value of RX drop enable.
271 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
274 * Configurable value of TX free threshold.
276 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
279 * Configurable value of TX RS bit threshold.
281 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
284 * Configurable value of buffered packets before sending.
286 uint16_t noisy_tx_sw_bufsz;
289 * Configurable value of packet buffer timeout.
291 uint16_t noisy_tx_sw_buf_flush_time;
294 * Configurable value for size of VNF internal memory area
295 * used for simulating noisy neighbour behaviour
297 uint64_t noisy_lkup_mem_sz;
300 * Configurable value of number of random writes done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_writes;
306 * Configurable value of number of random reads done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads;
312 * Configurable value of number of random reads/writes done in
313 * VNF simulation memory area.
315 uint64_t noisy_lkup_num_reads_writes;
318 * Receive Side Scaling (RSS) configuration.
320 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
323 * Port topology configuration
325 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
328 * Avoids to flush all the RX streams before starts forwarding.
330 uint8_t no_flush_rx = 0; /* flush by default */
333 * Flow API isolated mode.
335 uint8_t flow_isolate_all;
338 * Avoids to check link status when starting/stopping a port.
340 uint8_t no_link_check = 0; /* check by default */
343 * Enable link status change notification
345 uint8_t lsc_interrupt = 1; /* enabled by default */
348 * Enable device removal notification.
350 uint8_t rmv_interrupt = 1; /* enabled by default */
352 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
354 /* After attach, port setup is called on event or by iterator */
355 bool setup_on_probe_event = true;
357 /* Pretty printing of ethdev events */
358 static const char * const eth_event_desc[] = {
359 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
360 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
361 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
362 [RTE_ETH_EVENT_INTR_RESET] = "reset",
363 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
364 [RTE_ETH_EVENT_IPSEC] = "IPsec",
365 [RTE_ETH_EVENT_MACSEC] = "MACsec",
366 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
367 [RTE_ETH_EVENT_NEW] = "device probed",
368 [RTE_ETH_EVENT_DESTROY] = "device released",
369 [RTE_ETH_EVENT_MAX] = NULL,
373 * Display or mask ether events
374 * Default to all events except VF_MBOX
376 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
377 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
378 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
379 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
380 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
381 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
382 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
384 * Decide if all memory are locked for performance.
389 * NIC bypass mode configuration options.
392 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
393 /* The NIC bypass watchdog timeout. */
394 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
398 #ifdef RTE_LIBRTE_LATENCY_STATS
401 * Set when latency stats is enabled in the commandline
403 uint8_t latencystats_enabled;
406 * Lcore ID to serive latency statistics.
408 lcoreid_t latencystats_lcore_id = -1;
413 * Ethernet device configuration.
415 struct rte_eth_rxmode rx_mode = {
416 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
417 /**< Default maximum frame length. */
420 struct rte_eth_txmode tx_mode = {
421 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
424 struct rte_fdir_conf fdir_conf = {
425 .mode = RTE_FDIR_MODE_NONE,
426 .pballoc = RTE_FDIR_PBALLOC_64K,
427 .status = RTE_FDIR_REPORT_STATUS,
429 .vlan_tci_mask = 0xFFEF,
431 .src_ip = 0xFFFFFFFF,
432 .dst_ip = 0xFFFFFFFF,
435 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
436 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
438 .src_port_mask = 0xFFFF,
439 .dst_port_mask = 0xFFFF,
440 .mac_addr_byte_mask = 0xFF,
441 .tunnel_type_mask = 1,
442 .tunnel_id_mask = 0xFFFFFFFF,
447 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
449 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
450 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
452 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
453 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
455 uint16_t nb_tx_queue_stats_mappings = 0;
456 uint16_t nb_rx_queue_stats_mappings = 0;
459 * Display zero values by default for xstats
461 uint8_t xstats_hide_zero;
463 unsigned int num_sockets = 0;
464 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
466 #ifdef RTE_LIBRTE_BITRATE
467 /* Bitrate statistics */
468 struct rte_stats_bitrates *bitrate_data;
469 lcoreid_t bitrate_lcore_id;
470 uint8_t bitrate_enabled;
473 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
474 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
476 struct vxlan_encap_conf vxlan_encap_conf = {
480 .vni = "\x00\x00\x00",
482 .udp_dst = RTE_BE16(4789),
483 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
484 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
485 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
486 "\x00\x00\x00\x00\x00\x00\x00\x01",
487 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
488 "\x00\x00\x00\x00\x00\x00\x11\x11",
492 .eth_src = "\x00\x00\x00\x00\x00\x00",
493 .eth_dst = "\xff\xff\xff\xff\xff\xff",
496 struct nvgre_encap_conf nvgre_encap_conf = {
499 .tni = "\x00\x00\x00",
500 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
501 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
502 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
503 "\x00\x00\x00\x00\x00\x00\x00\x01",
504 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
505 "\x00\x00\x00\x00\x00\x00\x11\x11",
507 .eth_src = "\x00\x00\x00\x00\x00\x00",
508 .eth_dst = "\xff\xff\xff\xff\xff\xff",
511 /* Forward function declarations */
512 static void setup_attached_port(portid_t pi);
513 static void map_port_queue_stats_mapping_registers(portid_t pi,
514 struct rte_port *port);
515 static void check_all_ports_link_status(uint32_t port_mask);
516 static int eth_event_callback(portid_t port_id,
517 enum rte_eth_event_type type,
518 void *param, void *ret_param);
519 static void dev_event_callback(const char *device_name,
520 enum rte_dev_event_type type,
524 * Check if all the ports are started.
525 * If yes, return positive value. If not, return zero.
527 static int all_ports_started(void);
529 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
530 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
533 * Helper function to check if socket is already discovered.
534 * If yes, return positive value. If not, return zero.
537 new_socket_id(unsigned int socket_id)
541 for (i = 0; i < num_sockets; i++) {
542 if (socket_ids[i] == socket_id)
549 * Setup default configuration.
552 set_default_fwd_lcores_config(void)
556 unsigned int sock_num;
559 for (i = 0; i < RTE_MAX_LCORE; i++) {
560 if (!rte_lcore_is_enabled(i))
562 sock_num = rte_lcore_to_socket_id(i);
563 if (new_socket_id(sock_num)) {
564 if (num_sockets >= RTE_MAX_NUMA_NODES) {
565 rte_exit(EXIT_FAILURE,
566 "Total sockets greater than %u\n",
569 socket_ids[num_sockets++] = sock_num;
571 if (i == rte_get_master_lcore())
573 fwd_lcores_cpuids[nb_lc++] = i;
575 nb_lcores = (lcoreid_t) nb_lc;
576 nb_cfg_lcores = nb_lcores;
581 set_def_peer_eth_addrs(void)
585 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
586 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
587 peer_eth_addrs[i].addr_bytes[5] = i;
592 set_default_fwd_ports_config(void)
597 RTE_ETH_FOREACH_DEV(pt_id) {
598 fwd_ports_ids[i++] = pt_id;
600 /* Update sockets info according to the attached device */
601 int socket_id = rte_eth_dev_socket_id(pt_id);
602 if (socket_id >= 0 && new_socket_id(socket_id)) {
603 if (num_sockets >= RTE_MAX_NUMA_NODES) {
604 rte_exit(EXIT_FAILURE,
605 "Total sockets greater than %u\n",
608 socket_ids[num_sockets++] = socket_id;
612 nb_cfg_ports = nb_ports;
613 nb_fwd_ports = nb_ports;
617 set_def_fwd_config(void)
619 set_default_fwd_lcores_config();
620 set_def_peer_eth_addrs();
621 set_default_fwd_ports_config();
624 /* extremely pessimistic estimation of memory required to create a mempool */
626 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
628 unsigned int n_pages, mbuf_per_pg, leftover;
629 uint64_t total_mem, mbuf_mem, obj_sz;
631 /* there is no good way to predict how much space the mempool will
632 * occupy because it will allocate chunks on the fly, and some of those
633 * will come from default DPDK memory while some will come from our
634 * external memory, so just assume 128MB will be enough for everyone.
636 uint64_t hdr_mem = 128 << 20;
638 /* account for possible non-contiguousness */
639 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
641 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
645 mbuf_per_pg = pgsz / obj_sz;
646 leftover = (nb_mbufs % mbuf_per_pg) > 0;
647 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
649 mbuf_mem = n_pages * pgsz;
651 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
653 if (total_mem > SIZE_MAX) {
654 TESTPMD_LOG(ERR, "Memory size too big\n");
657 *out = (size_t)total_mem;
663 pagesz_flags(uint64_t page_sz)
665 /* as per mmap() manpage, all page sizes are log2 of page size
666 * shifted by MAP_HUGE_SHIFT
668 int log2 = rte_log2_u64(page_sz);
670 return (log2 << HUGE_SHIFT);
674 alloc_mem(size_t memsz, size_t pgsz, bool huge)
679 /* allocate anonymous hugepages */
680 flags = MAP_ANONYMOUS | MAP_PRIVATE;
682 flags |= HUGE_FLAG | pagesz_flags(pgsz);
684 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
685 if (addr == MAP_FAILED)
691 struct extmem_param {
695 rte_iova_t *iova_table;
696 unsigned int iova_table_len;
700 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
703 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
704 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
705 unsigned int cur_page, n_pages, pgsz_idx;
706 size_t mem_sz, cur_pgsz;
707 rte_iova_t *iovas = NULL;
711 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
712 /* skip anything that is too big */
713 if (pgsizes[pgsz_idx] > SIZE_MAX)
716 cur_pgsz = pgsizes[pgsz_idx];
718 /* if we were told not to allocate hugepages, override */
720 cur_pgsz = sysconf(_SC_PAGESIZE);
722 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
724 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
728 /* allocate our memory */
729 addr = alloc_mem(mem_sz, cur_pgsz, huge);
731 /* if we couldn't allocate memory with a specified page size,
732 * that doesn't mean we can't do it with other page sizes, so
738 /* store IOVA addresses for every page in this memory area */
739 n_pages = mem_sz / cur_pgsz;
741 iovas = malloc(sizeof(*iovas) * n_pages);
744 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
747 /* lock memory if it's not huge pages */
751 /* populate IOVA addresses */
752 for (cur_page = 0; cur_page < n_pages; cur_page++) {
757 offset = cur_pgsz * cur_page;
758 cur = RTE_PTR_ADD(addr, offset);
760 /* touch the page before getting its IOVA */
761 *(volatile char *)cur = 0;
763 iova = rte_mem_virt2iova(cur);
765 iovas[cur_page] = iova;
770 /* if we couldn't allocate anything */
776 param->pgsz = cur_pgsz;
777 param->iova_table = iovas;
778 param->iova_table_len = n_pages;
785 munmap(addr, mem_sz);
791 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
793 struct extmem_param param;
796 memset(¶m, 0, sizeof(param));
798 /* check if our heap exists */
799 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
801 /* create our heap */
802 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
804 TESTPMD_LOG(ERR, "Cannot create heap\n");
809 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
811 TESTPMD_LOG(ERR, "Cannot create memory area\n");
815 /* we now have a valid memory area, so add it to heap */
816 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
817 param.addr, param.len, param.iova_table,
818 param.iova_table_len, param.pgsz);
820 /* when using VFIO, memory is automatically mapped for DMA by EAL */
822 /* not needed any more */
823 free(param.iova_table);
826 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
827 munmap(param.addr, param.len);
833 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
839 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
840 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
845 RTE_ETH_FOREACH_DEV(pid) {
846 struct rte_eth_dev *dev =
847 &rte_eth_devices[pid];
849 ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
853 "unable to DMA unmap addr 0x%p "
855 memhdr->addr, dev->data->name);
858 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
861 "unable to un-register addr 0x%p\n", memhdr->addr);
866 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
867 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
870 size_t page_size = sysconf(_SC_PAGESIZE);
873 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
877 "unable to register addr 0x%p\n", memhdr->addr);
880 RTE_ETH_FOREACH_DEV(pid) {
881 struct rte_eth_dev *dev =
882 &rte_eth_devices[pid];
884 ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
888 "unable to DMA map addr 0x%p "
890 memhdr->addr, dev->data->name);
896 * Configuration initialisation done once at init time.
898 static struct rte_mempool *
899 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
900 unsigned int socket_id)
902 char pool_name[RTE_MEMPOOL_NAMESIZE];
903 struct rte_mempool *rte_mp = NULL;
906 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
907 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
910 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
911 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
913 switch (mp_alloc_type) {
914 case MP_ALLOC_NATIVE:
916 /* wrapper to rte_mempool_create() */
917 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
918 rte_mbuf_best_mempool_ops());
919 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
920 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
925 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
926 mb_size, (unsigned int) mb_mempool_cache,
927 sizeof(struct rte_pktmbuf_pool_private),
928 socket_id, mempool_flags);
932 if (rte_mempool_populate_anon(rte_mp) == 0) {
933 rte_mempool_free(rte_mp);
937 rte_pktmbuf_pool_init(rte_mp, NULL);
938 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
939 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
943 case MP_ALLOC_XMEM_HUGE:
946 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
948 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
949 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
952 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
954 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
956 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
957 rte_mbuf_best_mempool_ops());
958 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
959 mb_mempool_cache, 0, mbuf_seg_size,
965 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
970 if (rte_mp == NULL) {
971 rte_exit(EXIT_FAILURE,
972 "Creation of mbuf pool for socket %u failed: %s\n",
973 socket_id, rte_strerror(rte_errno));
974 } else if (verbose_level > 0) {
975 rte_mempool_dump(stdout, rte_mp);
981 * Check given socket id is valid or not with NUMA mode,
982 * if valid, return 0, else return -1
985 check_socket_id(const unsigned int socket_id)
987 static int warning_once = 0;
989 if (new_socket_id(socket_id)) {
990 if (!warning_once && numa_support)
991 printf("Warning: NUMA should be configured manually by"
992 " using --port-numa-config and"
993 " --ring-numa-config parameters along with"
1002 * Get the allowed maximum number of RX queues.
1003 * *pid return the port id which has minimal value of
1004 * max_rx_queues in all ports.
1007 get_allowed_max_nb_rxq(portid_t *pid)
1009 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
1011 struct rte_eth_dev_info dev_info;
1013 RTE_ETH_FOREACH_DEV(pi) {
1014 rte_eth_dev_info_get(pi, &dev_info);
1015 if (dev_info.max_rx_queues < allowed_max_rxq) {
1016 allowed_max_rxq = dev_info.max_rx_queues;
1020 return allowed_max_rxq;
1024 * Check input rxq is valid or not.
1025 * If input rxq is not greater than any of maximum number
1026 * of RX queues of all ports, it is valid.
1027 * if valid, return 0, else return -1
1030 check_nb_rxq(queueid_t rxq)
1032 queueid_t allowed_max_rxq;
1035 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1036 if (rxq > allowed_max_rxq) {
1037 printf("Fail: input rxq (%u) can't be greater "
1038 "than max_rx_queues (%u) of port %u\n",
1048 * Get the allowed maximum number of TX queues.
1049 * *pid return the port id which has minimal value of
1050 * max_tx_queues in all ports.
1053 get_allowed_max_nb_txq(portid_t *pid)
1055 queueid_t allowed_max_txq = MAX_QUEUE_ID;
1057 struct rte_eth_dev_info dev_info;
1059 RTE_ETH_FOREACH_DEV(pi) {
1060 rte_eth_dev_info_get(pi, &dev_info);
1061 if (dev_info.max_tx_queues < allowed_max_txq) {
1062 allowed_max_txq = dev_info.max_tx_queues;
1066 return allowed_max_txq;
1070 * Check input txq is valid or not.
1071 * If input txq is not greater than any of maximum number
1072 * of TX queues of all ports, it is valid.
1073 * if valid, return 0, else return -1
1076 check_nb_txq(queueid_t txq)
1078 queueid_t allowed_max_txq;
1081 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1082 if (txq > allowed_max_txq) {
1083 printf("Fail: input txq (%u) can't be greater "
1084 "than max_tx_queues (%u) of port %u\n",
1097 struct rte_port *port;
1098 struct rte_mempool *mbp;
1099 unsigned int nb_mbuf_per_pool;
1101 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1102 struct rte_gro_param gro_param;
1108 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1110 /* Configuration of logical cores. */
1111 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1112 sizeof(struct fwd_lcore *) * nb_lcores,
1113 RTE_CACHE_LINE_SIZE);
1114 if (fwd_lcores == NULL) {
1115 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1116 "failed\n", nb_lcores);
1118 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1119 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1120 sizeof(struct fwd_lcore),
1121 RTE_CACHE_LINE_SIZE);
1122 if (fwd_lcores[lc_id] == NULL) {
1123 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1126 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1129 RTE_ETH_FOREACH_DEV(pid) {
1131 /* Apply default TxRx configuration for all ports */
1132 port->dev_conf.txmode = tx_mode;
1133 port->dev_conf.rxmode = rx_mode;
1134 rte_eth_dev_info_get(pid, &port->dev_info);
1136 if (!(port->dev_info.tx_offload_capa &
1137 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1138 port->dev_conf.txmode.offloads &=
1139 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1140 if (!(port->dev_info.tx_offload_capa &
1141 DEV_TX_OFFLOAD_MATCH_METADATA))
1142 port->dev_conf.txmode.offloads &=
1143 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1145 if (port_numa[pid] != NUMA_NO_CONFIG)
1146 port_per_socket[port_numa[pid]]++;
1148 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1151 * if socket_id is invalid,
1152 * set to the first available socket.
1154 if (check_socket_id(socket_id) < 0)
1155 socket_id = socket_ids[0];
1156 port_per_socket[socket_id]++;
1160 /* Apply Rx offloads configuration */
1161 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1162 port->rx_conf[k].offloads =
1163 port->dev_conf.rxmode.offloads;
1164 /* Apply Tx offloads configuration */
1165 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1166 port->tx_conf[k].offloads =
1167 port->dev_conf.txmode.offloads;
1169 /* set flag to initialize port/queue */
1170 port->need_reconfig = 1;
1171 port->need_reconfig_queues = 1;
1172 port->tx_metadata = 0;
1174 /* Check for maximum number of segments per MTU. Accordingly
1175 * update the mbuf data size.
1177 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1178 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1179 data_size = rx_mode.max_rx_pkt_len /
1180 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1182 if ((data_size + RTE_PKTMBUF_HEADROOM) >
1184 mbuf_data_size = data_size +
1185 RTE_PKTMBUF_HEADROOM;
1192 TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
1196 * Create pools of mbuf.
1197 * If NUMA support is disabled, create a single pool of mbuf in
1198 * socket 0 memory by default.
1199 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1201 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1202 * nb_txd can be configured at run time.
1204 if (param_total_num_mbufs)
1205 nb_mbuf_per_pool = param_total_num_mbufs;
1207 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1208 (nb_lcores * mb_mempool_cache) +
1209 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1210 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1216 for (i = 0; i < num_sockets; i++)
1217 mempools[i] = mbuf_pool_create(mbuf_data_size,
1221 if (socket_num == UMA_NO_CONFIG)
1222 mempools[0] = mbuf_pool_create(mbuf_data_size,
1223 nb_mbuf_per_pool, 0);
1225 mempools[socket_num] = mbuf_pool_create
1233 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1234 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1236 * Records which Mbuf pool to use by each logical core, if needed.
1238 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1239 mbp = mbuf_pool_find(
1240 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1243 mbp = mbuf_pool_find(0);
1244 fwd_lcores[lc_id]->mbp = mbp;
1245 /* initialize GSO context */
1246 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1247 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1248 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1249 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1251 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1254 /* Configuration of packet forwarding streams. */
1255 if (init_fwd_streams() < 0)
1256 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1260 /* create a gro context for each lcore */
1261 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1262 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1263 gro_param.max_item_per_flow = MAX_PKT_BURST;
1264 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1265 gro_param.socket_id = rte_lcore_to_socket_id(
1266 fwd_lcores_cpuids[lc_id]);
1267 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1268 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1269 rte_exit(EXIT_FAILURE,
1270 "rte_gro_ctx_create() failed\n");
1274 #if defined RTE_LIBRTE_PMD_SOFTNIC
1275 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1276 RTE_ETH_FOREACH_DEV(pid) {
1278 const char *driver = port->dev_info.driver_name;
1280 if (strcmp(driver, "net_softnic") == 0)
1281 port->softport.fwd_lcore_arg = fwd_lcores;
1290 reconfig(portid_t new_port_id, unsigned socket_id)
1292 struct rte_port *port;
1294 /* Reconfiguration of Ethernet ports. */
1295 port = &ports[new_port_id];
1296 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1298 /* set flag to initialize port/queue */
1299 port->need_reconfig = 1;
1300 port->need_reconfig_queues = 1;
1301 port->socket_id = socket_id;
1308 init_fwd_streams(void)
1311 struct rte_port *port;
1312 streamid_t sm_id, nb_fwd_streams_new;
1315 /* set socket id according to numa or not */
1316 RTE_ETH_FOREACH_DEV(pid) {
1318 if (nb_rxq > port->dev_info.max_rx_queues) {
1319 printf("Fail: nb_rxq(%d) is greater than "
1320 "max_rx_queues(%d)\n", nb_rxq,
1321 port->dev_info.max_rx_queues);
1324 if (nb_txq > port->dev_info.max_tx_queues) {
1325 printf("Fail: nb_txq(%d) is greater than "
1326 "max_tx_queues(%d)\n", nb_txq,
1327 port->dev_info.max_tx_queues);
1331 if (port_numa[pid] != NUMA_NO_CONFIG)
1332 port->socket_id = port_numa[pid];
1334 port->socket_id = rte_eth_dev_socket_id(pid);
1337 * if socket_id is invalid,
1338 * set to the first available socket.
1340 if (check_socket_id(port->socket_id) < 0)
1341 port->socket_id = socket_ids[0];
1345 if (socket_num == UMA_NO_CONFIG)
1346 port->socket_id = 0;
1348 port->socket_id = socket_num;
1352 q = RTE_MAX(nb_rxq, nb_txq);
1354 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1357 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1358 if (nb_fwd_streams_new == nb_fwd_streams)
1361 if (fwd_streams != NULL) {
1362 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1363 if (fwd_streams[sm_id] == NULL)
1365 rte_free(fwd_streams[sm_id]);
1366 fwd_streams[sm_id] = NULL;
1368 rte_free(fwd_streams);
1373 nb_fwd_streams = nb_fwd_streams_new;
1374 if (nb_fwd_streams) {
1375 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1376 sizeof(struct fwd_stream *) * nb_fwd_streams,
1377 RTE_CACHE_LINE_SIZE);
1378 if (fwd_streams == NULL)
1379 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1380 " (struct fwd_stream *)) failed\n",
1383 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1384 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1385 " struct fwd_stream", sizeof(struct fwd_stream),
1386 RTE_CACHE_LINE_SIZE);
1387 if (fwd_streams[sm_id] == NULL)
1388 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1389 "(struct fwd_stream) failed\n");
1396 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1398 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1400 unsigned int total_burst;
1401 unsigned int nb_burst;
1402 unsigned int burst_stats[3];
1403 uint16_t pktnb_stats[3];
1405 int burst_percent[3];
1408 * First compute the total number of packet bursts and the
1409 * two highest numbers of bursts of the same number of packets.
1412 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1413 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1414 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1415 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1418 total_burst += nb_burst;
1419 if (nb_burst > burst_stats[0]) {
1420 burst_stats[1] = burst_stats[0];
1421 pktnb_stats[1] = pktnb_stats[0];
1422 burst_stats[0] = nb_burst;
1423 pktnb_stats[0] = nb_pkt;
1424 } else if (nb_burst > burst_stats[1]) {
1425 burst_stats[1] = nb_burst;
1426 pktnb_stats[1] = nb_pkt;
1429 if (total_burst == 0)
1431 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1432 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1433 burst_percent[0], (int) pktnb_stats[0]);
1434 if (burst_stats[0] == total_burst) {
1438 if (burst_stats[0] + burst_stats[1] == total_burst) {
1439 printf(" + %d%% of %d pkts]\n",
1440 100 - burst_percent[0], pktnb_stats[1]);
1443 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1444 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1445 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1446 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1449 printf(" + %d%% of %d pkts + %d%% of others]\n",
1450 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1452 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1455 fwd_stream_stats_display(streamid_t stream_id)
1457 struct fwd_stream *fs;
1458 static const char *fwd_top_stats_border = "-------";
1460 fs = fwd_streams[stream_id];
1461 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1462 (fs->fwd_dropped == 0))
1464 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1465 "TX Port=%2d/Queue=%2d %s\n",
1466 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1467 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1468 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1469 " TX-dropped: %-14"PRIu64,
1470 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1472 /* if checksum mode */
1473 if (cur_fwd_eng == &csum_fwd_engine) {
1474 printf(" RX- bad IP checksum: %-14"PRIu64
1475 " Rx- bad L4 checksum: %-14"PRIu64
1476 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1477 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1478 fs->rx_bad_outer_l4_csum);
1483 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1484 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1485 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1490 fwd_stats_display(void)
1492 static const char *fwd_stats_border = "----------------------";
1493 static const char *acc_stats_border = "+++++++++++++++";
1495 struct fwd_stream *rx_stream;
1496 struct fwd_stream *tx_stream;
1497 uint64_t tx_dropped;
1498 uint64_t rx_bad_ip_csum;
1499 uint64_t rx_bad_l4_csum;
1500 uint64_t rx_bad_outer_l4_csum;
1501 } ports_stats[RTE_MAX_ETHPORTS];
1502 uint64_t total_rx_dropped = 0;
1503 uint64_t total_tx_dropped = 0;
1504 uint64_t total_rx_nombuf = 0;
1505 struct rte_eth_stats stats;
1506 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1507 uint64_t fwd_cycles = 0;
1509 uint64_t total_recv = 0;
1510 uint64_t total_xmit = 0;
1511 struct rte_port *port;
1516 memset(ports_stats, 0, sizeof(ports_stats));
1518 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1519 struct fwd_stream *fs = fwd_streams[sm_id];
1521 if (cur_fwd_config.nb_fwd_streams >
1522 cur_fwd_config.nb_fwd_ports) {
1523 fwd_stream_stats_display(sm_id);
1525 ports_stats[fs->tx_port].tx_stream = fs;
1526 ports_stats[fs->rx_port].rx_stream = fs;
1529 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1531 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1532 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1533 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1534 fs->rx_bad_outer_l4_csum;
1536 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1537 fwd_cycles += fs->core_cycles;
1540 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1543 pt_id = fwd_ports_ids[i];
1544 port = &ports[pt_id];
1546 rte_eth_stats_get(pt_id, &stats);
1547 stats.ipackets -= port->stats.ipackets;
1548 stats.opackets -= port->stats.opackets;
1549 stats.ibytes -= port->stats.ibytes;
1550 stats.obytes -= port->stats.obytes;
1551 stats.imissed -= port->stats.imissed;
1552 stats.oerrors -= port->stats.oerrors;
1553 stats.rx_nombuf -= port->stats.rx_nombuf;
1555 total_recv += stats.ipackets;
1556 total_xmit += stats.opackets;
1557 total_rx_dropped += stats.imissed;
1558 total_tx_dropped += ports_stats[pt_id].tx_dropped;
1559 total_tx_dropped += stats.oerrors;
1560 total_rx_nombuf += stats.rx_nombuf;
1562 printf("\n %s Forward statistics for port %-2d %s\n",
1563 fwd_stats_border, pt_id, fwd_stats_border);
1565 if (!port->rx_queue_stats_mapping_enabled &&
1566 !port->tx_queue_stats_mapping_enabled) {
1567 printf(" RX-packets: %-14"PRIu64
1568 " RX-dropped: %-14"PRIu64
1569 "RX-total: %-"PRIu64"\n",
1570 stats.ipackets, stats.imissed,
1571 stats.ipackets + stats.imissed);
1573 if (cur_fwd_eng == &csum_fwd_engine)
1574 printf(" Bad-ipcsum: %-14"PRIu64
1575 " Bad-l4csum: %-14"PRIu64
1576 "Bad-outer-l4csum: %-14"PRIu64"\n",
1577 ports_stats[pt_id].rx_bad_ip_csum,
1578 ports_stats[pt_id].rx_bad_l4_csum,
1579 ports_stats[pt_id].rx_bad_outer_l4_csum);
1580 if (stats.ierrors + stats.rx_nombuf > 0) {
1581 printf(" RX-error: %-"PRIu64"\n",
1583 printf(" RX-nombufs: %-14"PRIu64"\n",
1587 printf(" TX-packets: %-14"PRIu64
1588 " TX-dropped: %-14"PRIu64
1589 "TX-total: %-"PRIu64"\n",
1590 stats.opackets, ports_stats[pt_id].tx_dropped,
1591 stats.opackets + ports_stats[pt_id].tx_dropped);
1593 printf(" RX-packets: %14"PRIu64
1594 " RX-dropped:%14"PRIu64
1595 " RX-total:%14"PRIu64"\n",
1596 stats.ipackets, stats.imissed,
1597 stats.ipackets + stats.imissed);
1599 if (cur_fwd_eng == &csum_fwd_engine)
1600 printf(" Bad-ipcsum:%14"PRIu64
1601 " Bad-l4csum:%14"PRIu64
1602 " Bad-outer-l4csum: %-14"PRIu64"\n",
1603 ports_stats[pt_id].rx_bad_ip_csum,
1604 ports_stats[pt_id].rx_bad_l4_csum,
1605 ports_stats[pt_id].rx_bad_outer_l4_csum);
1606 if ((stats.ierrors + stats.rx_nombuf) > 0) {
1607 printf(" RX-error:%"PRIu64"\n", stats.ierrors);
1608 printf(" RX-nombufs: %14"PRIu64"\n",
1612 printf(" TX-packets: %14"PRIu64
1613 " TX-dropped:%14"PRIu64
1614 " TX-total:%14"PRIu64"\n",
1615 stats.opackets, ports_stats[pt_id].tx_dropped,
1616 stats.opackets + ports_stats[pt_id].tx_dropped);
1619 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1620 if (ports_stats[pt_id].rx_stream)
1621 pkt_burst_stats_display("RX",
1622 &ports_stats[pt_id].rx_stream->rx_burst_stats);
1623 if (ports_stats[pt_id].tx_stream)
1624 pkt_burst_stats_display("TX",
1625 &ports_stats[pt_id].tx_stream->tx_burst_stats);
1628 if (port->rx_queue_stats_mapping_enabled) {
1630 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1631 printf(" Stats reg %2d RX-packets:%14"PRIu64
1632 " RX-errors:%14"PRIu64
1633 " RX-bytes:%14"PRIu64"\n",
1634 j, stats.q_ipackets[j],
1635 stats.q_errors[j], stats.q_ibytes[j]);
1639 if (port->tx_queue_stats_mapping_enabled) {
1640 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1641 printf(" Stats reg %2d TX-packets:%14"PRIu64
1644 j, stats.q_opackets[j],
1649 printf(" %s--------------------------------%s\n",
1650 fwd_stats_border, fwd_stats_border);
1653 printf("\n %s Accumulated forward statistics for all ports"
1655 acc_stats_border, acc_stats_border);
1656 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1658 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1660 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1661 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1662 if (total_rx_nombuf > 0)
1663 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1664 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1666 acc_stats_border, acc_stats_border);
1667 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1669 printf("\n CPU cycles/packet=%u (total cycles="
1670 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1671 (unsigned int)(fwd_cycles / total_recv),
1672 fwd_cycles, total_recv);
1677 fwd_stats_reset(void)
1683 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1684 pt_id = fwd_ports_ids[i];
1685 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
1687 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1688 struct fwd_stream *fs = fwd_streams[sm_id];
1692 fs->fwd_dropped = 0;
1693 fs->rx_bad_ip_csum = 0;
1694 fs->rx_bad_l4_csum = 0;
1695 fs->rx_bad_outer_l4_csum = 0;
1697 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1698 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
1699 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
1701 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1702 fs->core_cycles = 0;
1708 flush_fwd_rx_queues(void)
1710 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1717 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1718 uint64_t timer_period;
1720 /* convert to number of cycles */
1721 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1723 for (j = 0; j < 2; j++) {
1724 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1725 for (rxq = 0; rxq < nb_rxq; rxq++) {
1726 port_id = fwd_ports_ids[rxp];
1728 * testpmd can stuck in the below do while loop
1729 * if rte_eth_rx_burst() always returns nonzero
1730 * packets. So timer is added to exit this loop
1731 * after 1sec timer expiry.
1733 prev_tsc = rte_rdtsc();
1735 nb_rx = rte_eth_rx_burst(port_id, rxq,
1736 pkts_burst, MAX_PKT_BURST);
1737 for (i = 0; i < nb_rx; i++)
1738 rte_pktmbuf_free(pkts_burst[i]);
1740 cur_tsc = rte_rdtsc();
1741 diff_tsc = cur_tsc - prev_tsc;
1742 timer_tsc += diff_tsc;
1743 } while ((nb_rx > 0) &&
1744 (timer_tsc < timer_period));
1748 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1753 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1755 struct fwd_stream **fsm;
1758 #ifdef RTE_LIBRTE_BITRATE
1759 uint64_t tics_per_1sec;
1760 uint64_t tics_datum;
1761 uint64_t tics_current;
1762 uint16_t i, cnt_ports;
1764 cnt_ports = nb_ports;
1765 tics_datum = rte_rdtsc();
1766 tics_per_1sec = rte_get_timer_hz();
1768 fsm = &fwd_streams[fc->stream_idx];
1769 nb_fs = fc->stream_nb;
1771 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1772 (*pkt_fwd)(fsm[sm_id]);
1773 #ifdef RTE_LIBRTE_BITRATE
1774 if (bitrate_enabled != 0 &&
1775 bitrate_lcore_id == rte_lcore_id()) {
1776 tics_current = rte_rdtsc();
1777 if (tics_current - tics_datum >= tics_per_1sec) {
1778 /* Periodic bitrate calculation */
1779 for (i = 0; i < cnt_ports; i++)
1780 rte_stats_bitrate_calc(bitrate_data,
1782 tics_datum = tics_current;
1786 #ifdef RTE_LIBRTE_LATENCY_STATS
1787 if (latencystats_enabled != 0 &&
1788 latencystats_lcore_id == rte_lcore_id())
1789 rte_latencystats_update();
1792 } while (! fc->stopped);
1796 start_pkt_forward_on_core(void *fwd_arg)
1798 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1799 cur_fwd_config.fwd_eng->packet_fwd);
1804 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1805 * Used to start communication flows in network loopback test configurations.
1808 run_one_txonly_burst_on_core(void *fwd_arg)
1810 struct fwd_lcore *fwd_lc;
1811 struct fwd_lcore tmp_lcore;
1813 fwd_lc = (struct fwd_lcore *) fwd_arg;
1814 tmp_lcore = *fwd_lc;
1815 tmp_lcore.stopped = 1;
1816 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1821 * Launch packet forwarding:
1822 * - Setup per-port forwarding context.
1823 * - launch logical cores with their forwarding configuration.
1826 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1828 port_fwd_begin_t port_fwd_begin;
1833 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1834 if (port_fwd_begin != NULL) {
1835 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1836 (*port_fwd_begin)(fwd_ports_ids[i]);
1838 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1839 lc_id = fwd_lcores_cpuids[i];
1840 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1841 fwd_lcores[i]->stopped = 0;
1842 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1843 fwd_lcores[i], lc_id);
1845 printf("launch lcore %u failed - diag=%d\n",
1852 * Launch packet forwarding configuration.
1855 start_packet_forwarding(int with_tx_first)
1857 port_fwd_begin_t port_fwd_begin;
1858 port_fwd_end_t port_fwd_end;
1859 struct rte_port *port;
1863 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1864 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1866 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1867 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1869 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1870 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1871 (!nb_rxq || !nb_txq))
1872 rte_exit(EXIT_FAILURE,
1873 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1874 cur_fwd_eng->fwd_mode_name);
1876 if (all_ports_started() == 0) {
1877 printf("Not all ports were started\n");
1880 if (test_done == 0) {
1881 printf("Packet forwarding already started\n");
1887 for (i = 0; i < nb_fwd_ports; i++) {
1888 pt_id = fwd_ports_ids[i];
1889 port = &ports[pt_id];
1890 if (!port->dcb_flag) {
1891 printf("In DCB mode, all forwarding ports must "
1892 "be configured in this mode.\n");
1896 if (nb_fwd_lcores == 1) {
1897 printf("In DCB mode,the nb forwarding cores "
1898 "should be larger than 1.\n");
1907 flush_fwd_rx_queues();
1909 pkt_fwd_config_display(&cur_fwd_config);
1910 rxtx_config_display();
1913 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1914 pt_id = fwd_ports_ids[i];
1915 port = &ports[pt_id];
1916 map_port_queue_stats_mapping_registers(pt_id, port);
1918 if (with_tx_first) {
1919 port_fwd_begin = tx_only_engine.port_fwd_begin;
1920 if (port_fwd_begin != NULL) {
1921 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1922 (*port_fwd_begin)(fwd_ports_ids[i]);
1924 while (with_tx_first--) {
1925 launch_packet_forwarding(
1926 run_one_txonly_burst_on_core);
1927 rte_eal_mp_wait_lcore();
1929 port_fwd_end = tx_only_engine.port_fwd_end;
1930 if (port_fwd_end != NULL) {
1931 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1932 (*port_fwd_end)(fwd_ports_ids[i]);
1935 launch_packet_forwarding(start_pkt_forward_on_core);
1939 stop_packet_forwarding(void)
1941 port_fwd_end_t port_fwd_end;
1947 printf("Packet forwarding not started\n");
1950 printf("Telling cores to stop...");
1951 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1952 fwd_lcores[lc_id]->stopped = 1;
1953 printf("\nWaiting for lcores to finish...\n");
1954 rte_eal_mp_wait_lcore();
1955 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1956 if (port_fwd_end != NULL) {
1957 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1958 pt_id = fwd_ports_ids[i];
1959 (*port_fwd_end)(pt_id);
1963 fwd_stats_display();
1965 printf("\nDone.\n");
1970 dev_set_link_up(portid_t pid)
1972 if (rte_eth_dev_set_link_up(pid) < 0)
1973 printf("\nSet link up fail.\n");
1977 dev_set_link_down(portid_t pid)
1979 if (rte_eth_dev_set_link_down(pid) < 0)
1980 printf("\nSet link down fail.\n");
1984 all_ports_started(void)
1987 struct rte_port *port;
1989 RTE_ETH_FOREACH_DEV(pi) {
1991 /* Check if there is a port which is not started */
1992 if ((port->port_status != RTE_PORT_STARTED) &&
1993 (port->slave_flag == 0))
1997 /* No port is not started */
2002 port_is_stopped(portid_t port_id)
2004 struct rte_port *port = &ports[port_id];
2006 if ((port->port_status != RTE_PORT_STOPPED) &&
2007 (port->slave_flag == 0))
2013 all_ports_stopped(void)
2017 RTE_ETH_FOREACH_DEV(pi) {
2018 if (!port_is_stopped(pi))
2026 port_is_started(portid_t port_id)
2028 if (port_id_is_invalid(port_id, ENABLED_WARN))
2031 if (ports[port_id].port_status != RTE_PORT_STARTED)
2038 start_port(portid_t pid)
2040 int diag, need_check_link_status = -1;
2043 struct rte_port *port;
2044 struct rte_ether_addr mac_addr;
2046 if (port_id_is_invalid(pid, ENABLED_WARN))
2051 RTE_ETH_FOREACH_DEV(pi) {
2052 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2055 need_check_link_status = 0;
2057 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2058 RTE_PORT_HANDLING) == 0) {
2059 printf("Port %d is now not stopped\n", pi);
2063 if (port->need_reconfig > 0) {
2064 port->need_reconfig = 0;
2066 if (flow_isolate_all) {
2067 int ret = port_flow_isolate(pi, 1);
2069 printf("Failed to apply isolated"
2070 " mode on port %d\n", pi);
2074 configure_rxtx_dump_callbacks(0);
2075 printf("Configuring Port %d (socket %u)\n", pi,
2077 /* configure port */
2078 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
2081 if (rte_atomic16_cmpset(&(port->port_status),
2082 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2083 printf("Port %d can not be set back "
2084 "to stopped\n", pi);
2085 printf("Fail to configure port %d\n", pi);
2086 /* try to reconfigure port next time */
2087 port->need_reconfig = 1;
2091 if (port->need_reconfig_queues > 0) {
2092 port->need_reconfig_queues = 0;
2093 /* setup tx queues */
2094 for (qi = 0; qi < nb_txq; qi++) {
2095 if ((numa_support) &&
2096 (txring_numa[pi] != NUMA_NO_CONFIG))
2097 diag = rte_eth_tx_queue_setup(pi, qi,
2098 port->nb_tx_desc[qi],
2100 &(port->tx_conf[qi]));
2102 diag = rte_eth_tx_queue_setup(pi, qi,
2103 port->nb_tx_desc[qi],
2105 &(port->tx_conf[qi]));
2110 /* Fail to setup tx queue, return */
2111 if (rte_atomic16_cmpset(&(port->port_status),
2113 RTE_PORT_STOPPED) == 0)
2114 printf("Port %d can not be set back "
2115 "to stopped\n", pi);
2116 printf("Fail to configure port %d tx queues\n",
2118 /* try to reconfigure queues next time */
2119 port->need_reconfig_queues = 1;
2122 for (qi = 0; qi < nb_rxq; qi++) {
2123 /* setup rx queues */
2124 if ((numa_support) &&
2125 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2126 struct rte_mempool * mp =
2127 mbuf_pool_find(rxring_numa[pi]);
2129 printf("Failed to setup RX queue:"
2130 "No mempool allocation"
2131 " on the socket %d\n",
2136 diag = rte_eth_rx_queue_setup(pi, qi,
2137 port->nb_rx_desc[qi],
2139 &(port->rx_conf[qi]),
2142 struct rte_mempool *mp =
2143 mbuf_pool_find(port->socket_id);
2145 printf("Failed to setup RX queue:"
2146 "No mempool allocation"
2147 " on the socket %d\n",
2151 diag = rte_eth_rx_queue_setup(pi, qi,
2152 port->nb_rx_desc[qi],
2154 &(port->rx_conf[qi]),
2160 /* Fail to setup rx queue, return */
2161 if (rte_atomic16_cmpset(&(port->port_status),
2163 RTE_PORT_STOPPED) == 0)
2164 printf("Port %d can not be set back "
2165 "to stopped\n", pi);
2166 printf("Fail to configure port %d rx queues\n",
2168 /* try to reconfigure queues next time */
2169 port->need_reconfig_queues = 1;
2173 configure_rxtx_dump_callbacks(verbose_level);
2175 if (rte_eth_dev_start(pi) < 0) {
2176 printf("Fail to start port %d\n", pi);
2178 /* Fail to setup rx queue, return */
2179 if (rte_atomic16_cmpset(&(port->port_status),
2180 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2181 printf("Port %d can not be set back to "
2186 if (rte_atomic16_cmpset(&(port->port_status),
2187 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2188 printf("Port %d can not be set into started\n", pi);
2190 rte_eth_macaddr_get(pi, &mac_addr);
2191 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2192 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2193 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2194 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2196 /* at least one port started, need checking link status */
2197 need_check_link_status = 1;
2200 if (need_check_link_status == 1 && !no_link_check)
2201 check_all_ports_link_status(RTE_PORT_ALL);
2202 else if (need_check_link_status == 0)
2203 printf("Please stop the ports first\n");
2210 stop_port(portid_t pid)
2213 struct rte_port *port;
2214 int need_check_link_status = 0;
2221 if (port_id_is_invalid(pid, ENABLED_WARN))
2224 printf("Stopping ports...\n");
2226 RTE_ETH_FOREACH_DEV(pi) {
2227 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2230 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2231 printf("Please remove port %d from forwarding configuration.\n", pi);
2235 if (port_is_bonding_slave(pi)) {
2236 printf("Please remove port %d from bonded device.\n", pi);
2241 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2242 RTE_PORT_HANDLING) == 0)
2245 rte_eth_dev_stop(pi);
2247 if (rte_atomic16_cmpset(&(port->port_status),
2248 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2249 printf("Port %d can not be set into stopped\n", pi);
2250 need_check_link_status = 1;
2252 if (need_check_link_status && !no_link_check)
2253 check_all_ports_link_status(RTE_PORT_ALL);
2259 remove_invalid_ports_in(portid_t *array, portid_t *total)
2262 portid_t new_total = 0;
2264 for (i = 0; i < *total; i++)
2265 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2266 array[new_total] = array[i];
2273 remove_invalid_ports(void)
2275 remove_invalid_ports_in(ports_ids, &nb_ports);
2276 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2277 nb_cfg_ports = nb_fwd_ports;
2281 close_port(portid_t pid)
2284 struct rte_port *port;
2286 if (port_id_is_invalid(pid, ENABLED_WARN))
2289 printf("Closing ports...\n");
2291 RTE_ETH_FOREACH_DEV(pi) {
2292 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2295 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2296 printf("Please remove port %d from forwarding configuration.\n", pi);
2300 if (port_is_bonding_slave(pi)) {
2301 printf("Please remove port %d from bonded device.\n", pi);
2306 if (rte_atomic16_cmpset(&(port->port_status),
2307 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2308 printf("Port %d is already closed\n", pi);
2312 if (rte_atomic16_cmpset(&(port->port_status),
2313 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2314 printf("Port %d is now not stopped\n", pi);
2318 if (port->flow_list)
2319 port_flow_flush(pi);
2320 rte_eth_dev_close(pi);
2322 remove_invalid_ports();
2324 if (rte_atomic16_cmpset(&(port->port_status),
2325 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2326 printf("Port %d cannot be set to closed\n", pi);
2333 reset_port(portid_t pid)
2337 struct rte_port *port;
2339 if (port_id_is_invalid(pid, ENABLED_WARN))
2342 printf("Resetting ports...\n");
2344 RTE_ETH_FOREACH_DEV(pi) {
2345 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2348 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2349 printf("Please remove port %d from forwarding "
2350 "configuration.\n", pi);
2354 if (port_is_bonding_slave(pi)) {
2355 printf("Please remove port %d from bonded device.\n",
2360 diag = rte_eth_dev_reset(pi);
2363 port->need_reconfig = 1;
2364 port->need_reconfig_queues = 1;
2366 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2374 attach_port(char *identifier)
2377 struct rte_dev_iterator iterator;
2379 printf("Attaching a new port...\n");
2381 if (identifier == NULL) {
2382 printf("Invalid parameters are specified\n");
2386 if (rte_dev_probe(identifier) < 0) {
2387 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2391 /* first attach mode: event */
2392 if (setup_on_probe_event) {
2393 /* new ports are detected on RTE_ETH_EVENT_NEW event */
2394 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2395 if (ports[pi].port_status == RTE_PORT_HANDLING &&
2396 ports[pi].need_setup != 0)
2397 setup_attached_port(pi);
2401 /* second attach mode: iterator */
2402 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2403 /* setup ports matching the devargs used for probing */
2404 if (port_is_forwarding(pi))
2405 continue; /* port was already attached before */
2406 setup_attached_port(pi);
2411 setup_attached_port(portid_t pi)
2413 unsigned int socket_id;
2415 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2416 /* if socket_id is invalid, set to the first available socket. */
2417 if (check_socket_id(socket_id) < 0)
2418 socket_id = socket_ids[0];
2419 reconfig(pi, socket_id);
2420 rte_eth_promiscuous_enable(pi);
2422 ports_ids[nb_ports++] = pi;
2423 fwd_ports_ids[nb_fwd_ports++] = pi;
2424 nb_cfg_ports = nb_fwd_ports;
2425 ports[pi].need_setup = 0;
2426 ports[pi].port_status = RTE_PORT_STOPPED;
2428 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2433 detach_port_device(portid_t port_id)
2435 struct rte_device *dev;
2438 printf("Removing a device...\n");
2440 dev = rte_eth_devices[port_id].device;
2442 printf("Device already removed\n");
2446 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2447 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2448 printf("Port not stopped\n");
2451 printf("Port was not closed\n");
2452 if (ports[port_id].flow_list)
2453 port_flow_flush(port_id);
2456 if (rte_dev_remove(dev) < 0) {
2457 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2460 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
2461 /* reset mapping between old ports and removed device */
2462 rte_eth_devices[sibling].device = NULL;
2463 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2464 /* sibling ports are forced to be closed */
2465 ports[sibling].port_status = RTE_PORT_CLOSED;
2466 printf("Port %u is closed\n", sibling);
2470 remove_invalid_ports();
2472 printf("Device of port %u is detached\n", port_id);
2473 printf("Now total ports is %d\n", nb_ports);
2486 stop_packet_forwarding();
2488 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2490 if (mp_alloc_type == MP_ALLOC_ANON)
2491 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
2495 if (ports != NULL) {
2497 RTE_ETH_FOREACH_DEV(pt_id) {
2498 printf("\nStopping port %d...\n", pt_id);
2502 RTE_ETH_FOREACH_DEV(pt_id) {
2503 printf("\nShutting down port %d...\n", pt_id);
2510 ret = rte_dev_event_monitor_stop();
2513 "fail to stop device event monitor.");
2517 ret = rte_dev_event_callback_unregister(NULL,
2518 dev_event_callback, NULL);
2521 "fail to unregister device event callback.\n");
2525 ret = rte_dev_hotplug_handle_disable();
2528 "fail to disable hotplug handling.\n");
2532 for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
2534 rte_mempool_free(mempools[i]);
2537 printf("\nBye...\n");
2540 typedef void (*cmd_func_t)(void);
2541 struct pmd_test_command {
2542 const char *cmd_name;
2543 cmd_func_t cmd_func;
2546 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2548 /* Check the link status of all ports in up to 9s, and print them finally */
2550 check_all_ports_link_status(uint32_t port_mask)
2552 #define CHECK_INTERVAL 100 /* 100ms */
2553 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2555 uint8_t count, all_ports_up, print_flag = 0;
2556 struct rte_eth_link link;
2558 printf("Checking link statuses...\n");
2560 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2562 RTE_ETH_FOREACH_DEV(portid) {
2563 if ((port_mask & (1 << portid)) == 0)
2565 memset(&link, 0, sizeof(link));
2566 rte_eth_link_get_nowait(portid, &link);
2567 /* print link status if flag set */
2568 if (print_flag == 1) {
2569 if (link.link_status)
2571 "Port%d Link Up. speed %u Mbps- %s\n",
2572 portid, link.link_speed,
2573 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2574 ("full-duplex") : ("half-duplex\n"));
2576 printf("Port %d Link Down\n", portid);
2579 /* clear all_ports_up flag if any link down */
2580 if (link.link_status == ETH_LINK_DOWN) {
2585 /* after finally printing all link status, get out */
2586 if (print_flag == 1)
2589 if (all_ports_up == 0) {
2591 rte_delay_ms(CHECK_INTERVAL);
2594 /* set the print_flag if all ports up or timeout */
2595 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2605 * This callback is for remove a port for a device. It has limitation because
2606 * it is not for multiple port removal for a device.
2607 * TODO: the device detach invoke will plan to be removed from user side to
2608 * eal. And convert all PMDs to free port resources on ether device closing.
2611 rmv_port_callback(void *arg)
2613 int need_to_start = 0;
2614 int org_no_link_check = no_link_check;
2615 portid_t port_id = (intptr_t)arg;
2617 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2619 if (!test_done && port_is_forwarding(port_id)) {
2621 stop_packet_forwarding();
2625 no_link_check = org_no_link_check;
2626 close_port(port_id);
2627 detach_port_device(port_id);
2629 start_packet_forwarding(0);
2632 /* This function is used by the interrupt thread */
2634 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2637 RTE_SET_USED(param);
2638 RTE_SET_USED(ret_param);
2640 if (type >= RTE_ETH_EVENT_MAX) {
2641 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2642 port_id, __func__, type);
2644 } else if (event_print_mask & (UINT32_C(1) << type)) {
2645 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2646 eth_event_desc[type]);
2651 case RTE_ETH_EVENT_NEW:
2652 ports[port_id].need_setup = 1;
2653 ports[port_id].port_status = RTE_PORT_HANDLING;
2655 case RTE_ETH_EVENT_INTR_RMV:
2656 if (port_id_is_invalid(port_id, DISABLED_WARN))
2658 if (rte_eal_alarm_set(100000,
2659 rmv_port_callback, (void *)(intptr_t)port_id))
2660 fprintf(stderr, "Could not set up deferred device removal\n");
2669 register_eth_event_callback(void)
2672 enum rte_eth_event_type event;
2674 for (event = RTE_ETH_EVENT_UNKNOWN;
2675 event < RTE_ETH_EVENT_MAX; event++) {
2676 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2681 TESTPMD_LOG(ERR, "Failed to register callback for "
2682 "%s event\n", eth_event_desc[event]);
2690 /* This function is used by the interrupt thread */
2692 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2693 __rte_unused void *arg)
2698 if (type >= RTE_DEV_EVENT_MAX) {
2699 fprintf(stderr, "%s called upon invalid event %d\n",
2705 case RTE_DEV_EVENT_REMOVE:
2706 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
2708 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2710 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2715 * Because the user's callback is invoked in eal interrupt
2716 * callback, the interrupt callback need to be finished before
2717 * it can be unregistered when detaching device. So finish
2718 * callback soon and use a deferred removal to detach device
2719 * is need. It is a workaround, once the device detaching be
2720 * moved into the eal in the future, the deferred removal could
2723 if (rte_eal_alarm_set(100000,
2724 rmv_port_callback, (void *)(intptr_t)port_id))
2726 "Could not set up deferred device removal\n");
2728 case RTE_DEV_EVENT_ADD:
2729 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2731 /* TODO: After finish kernel driver binding,
2732 * begin to attach port.
2741 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2745 uint8_t mapping_found = 0;
2747 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2748 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2749 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2750 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2751 tx_queue_stats_mappings[i].queue_id,
2752 tx_queue_stats_mappings[i].stats_counter_id);
2759 port->tx_queue_stats_mapping_enabled = 1;
2764 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2768 uint8_t mapping_found = 0;
2770 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2771 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2772 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2773 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2774 rx_queue_stats_mappings[i].queue_id,
2775 rx_queue_stats_mappings[i].stats_counter_id);
2782 port->rx_queue_stats_mapping_enabled = 1;
2787 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2791 diag = set_tx_queue_stats_mapping_registers(pi, port);
2793 if (diag == -ENOTSUP) {
2794 port->tx_queue_stats_mapping_enabled = 0;
2795 printf("TX queue stats mapping not supported port id=%d\n", pi);
2798 rte_exit(EXIT_FAILURE,
2799 "set_tx_queue_stats_mapping_registers "
2800 "failed for port id=%d diag=%d\n",
2804 diag = set_rx_queue_stats_mapping_registers(pi, port);
2806 if (diag == -ENOTSUP) {
2807 port->rx_queue_stats_mapping_enabled = 0;
2808 printf("RX queue stats mapping not supported port id=%d\n", pi);
2811 rte_exit(EXIT_FAILURE,
2812 "set_rx_queue_stats_mapping_registers "
2813 "failed for port id=%d diag=%d\n",
2819 rxtx_port_config(struct rte_port *port)
2824 for (qid = 0; qid < nb_rxq; qid++) {
2825 offloads = port->rx_conf[qid].offloads;
2826 port->rx_conf[qid] = port->dev_info.default_rxconf;
2828 port->rx_conf[qid].offloads = offloads;
2830 /* Check if any Rx parameters have been passed */
2831 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2832 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2834 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2835 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2837 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2838 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2840 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2841 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2843 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2844 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2846 port->nb_rx_desc[qid] = nb_rxd;
2849 for (qid = 0; qid < nb_txq; qid++) {
2850 offloads = port->tx_conf[qid].offloads;
2851 port->tx_conf[qid] = port->dev_info.default_txconf;
2853 port->tx_conf[qid].offloads = offloads;
2855 /* Check if any Tx parameters have been passed */
2856 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2857 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2859 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2860 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2862 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2863 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2865 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2866 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2868 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2869 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2871 port->nb_tx_desc[qid] = nb_txd;
2876 init_port_config(void)
2879 struct rte_port *port;
2881 RTE_ETH_FOREACH_DEV(pid) {
2883 port->dev_conf.fdir_conf = fdir_conf;
2884 rte_eth_dev_info_get(pid, &port->dev_info);
2886 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2887 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2888 rss_hf & port->dev_info.flow_type_rss_offloads;
2890 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2891 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2894 if (port->dcb_flag == 0) {
2895 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2896 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2898 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2901 rxtx_port_config(port);
2903 rte_eth_macaddr_get(pid, &port->eth_addr);
2905 map_port_queue_stats_mapping_registers(pid, port);
2906 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2907 rte_pmd_ixgbe_bypass_init(pid);
2910 if (lsc_interrupt &&
2911 (rte_eth_devices[pid].data->dev_flags &
2912 RTE_ETH_DEV_INTR_LSC))
2913 port->dev_conf.intr_conf.lsc = 1;
2914 if (rmv_interrupt &&
2915 (rte_eth_devices[pid].data->dev_flags &
2916 RTE_ETH_DEV_INTR_RMV))
2917 port->dev_conf.intr_conf.rmv = 1;
2921 void set_port_slave_flag(portid_t slave_pid)
2923 struct rte_port *port;
2925 port = &ports[slave_pid];
2926 port->slave_flag = 1;
2929 void clear_port_slave_flag(portid_t slave_pid)
2931 struct rte_port *port;
2933 port = &ports[slave_pid];
2934 port->slave_flag = 0;
2937 uint8_t port_is_bonding_slave(portid_t slave_pid)
2939 struct rte_port *port;
2941 port = &ports[slave_pid];
2942 if ((rte_eth_devices[slave_pid].data->dev_flags &
2943 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2948 const uint16_t vlan_tags[] = {
2949 0, 1, 2, 3, 4, 5, 6, 7,
2950 8, 9, 10, 11, 12, 13, 14, 15,
2951 16, 17, 18, 19, 20, 21, 22, 23,
2952 24, 25, 26, 27, 28, 29, 30, 31
2956 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2957 enum dcb_mode_enable dcb_mode,
2958 enum rte_eth_nb_tcs num_tcs,
2963 struct rte_eth_rss_conf rss_conf;
2966 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2967 * given above, and the number of traffic classes available for use.
2969 if (dcb_mode == DCB_VT_ENABLED) {
2970 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2971 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2972 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2973 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2975 /* VMDQ+DCB RX and TX configurations */
2976 vmdq_rx_conf->enable_default_pool = 0;
2977 vmdq_rx_conf->default_pool = 0;
2978 vmdq_rx_conf->nb_queue_pools =
2979 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2980 vmdq_tx_conf->nb_queue_pools =
2981 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2983 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2984 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2985 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2986 vmdq_rx_conf->pool_map[i].pools =
2987 1 << (i % vmdq_rx_conf->nb_queue_pools);
2989 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2990 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2991 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2994 /* set DCB mode of RX and TX of multiple queues */
2995 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2996 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2998 struct rte_eth_dcb_rx_conf *rx_conf =
2999 ð_conf->rx_adv_conf.dcb_rx_conf;
3000 struct rte_eth_dcb_tx_conf *tx_conf =
3001 ð_conf->tx_adv_conf.dcb_tx_conf;
3003 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3007 rx_conf->nb_tcs = num_tcs;
3008 tx_conf->nb_tcs = num_tcs;
3010 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3011 rx_conf->dcb_tc[i] = i % num_tcs;
3012 tx_conf->dcb_tc[i] = i % num_tcs;
3015 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
3016 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3017 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3021 eth_conf->dcb_capability_en =
3022 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3024 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3030 init_port_dcb_config(portid_t pid,
3031 enum dcb_mode_enable dcb_mode,
3032 enum rte_eth_nb_tcs num_tcs,
3035 struct rte_eth_conf port_conf;
3036 struct rte_port *rte_port;
3040 rte_port = &ports[pid];
3042 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3043 /* Enter DCB configuration status */
3046 port_conf.rxmode = rte_port->dev_conf.rxmode;
3047 port_conf.txmode = rte_port->dev_conf.txmode;
3049 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3050 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3053 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3055 /* re-configure the device . */
3056 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3059 rte_eth_dev_info_get(pid, &rte_port->dev_info);
3061 /* If dev_info.vmdq_pool_base is greater than 0,
3062 * the queue id of vmdq pools is started after pf queues.
3064 if (dcb_mode == DCB_VT_ENABLED &&
3065 rte_port->dev_info.vmdq_pool_base > 0) {
3066 printf("VMDQ_DCB multi-queue mode is nonsensical"
3067 " for port %d.", pid);
3071 /* Assume the ports in testpmd have the same dcb capability
3072 * and has the same number of rxq and txq in dcb mode
3074 if (dcb_mode == DCB_VT_ENABLED) {
3075 if (rte_port->dev_info.max_vfs > 0) {
3076 nb_rxq = rte_port->dev_info.nb_rx_queues;
3077 nb_txq = rte_port->dev_info.nb_tx_queues;
3079 nb_rxq = rte_port->dev_info.max_rx_queues;
3080 nb_txq = rte_port->dev_info.max_tx_queues;
3083 /*if vt is disabled, use all pf queues */
3084 if (rte_port->dev_info.vmdq_pool_base == 0) {
3085 nb_rxq = rte_port->dev_info.max_rx_queues;
3086 nb_txq = rte_port->dev_info.max_tx_queues;
3088 nb_rxq = (queueid_t)num_tcs;
3089 nb_txq = (queueid_t)num_tcs;
3093 rx_free_thresh = 64;
3095 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3097 rxtx_port_config(rte_port);
3099 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3100 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3101 rx_vft_set(pid, vlan_tags[i], 1);
3103 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
3104 map_port_queue_stats_mapping_registers(pid, rte_port);
3106 rte_port->dcb_flag = 1;
3114 /* Configuration of Ethernet ports. */
3115 ports = rte_zmalloc("testpmd: ports",
3116 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3117 RTE_CACHE_LINE_SIZE);
3118 if (ports == NULL) {
3119 rte_exit(EXIT_FAILURE,
3120 "rte_zmalloc(%d struct rte_port) failed\n",
3124 /* Initialize ports NUMA structures */
3125 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3126 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3127 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3141 const char clr[] = { 27, '[', '2', 'J', '\0' };
3142 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3144 /* Clear screen and move to top left */
3145 printf("%s%s", clr, top_left);
3147 printf("\nPort statistics ====================================");
3148 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3149 nic_stats_display(fwd_ports_ids[i]);
3155 signal_handler(int signum)
3157 if (signum == SIGINT || signum == SIGTERM) {
3158 printf("\nSignal %d received, preparing to exit...\n",
3160 #ifdef RTE_LIBRTE_PDUMP
3161 /* uninitialize packet capture framework */
3164 #ifdef RTE_LIBRTE_LATENCY_STATS
3165 rte_latencystats_uninit();
3168 /* Set flag to indicate the force termination. */
3170 /* exit with the expected status */
3171 signal(signum, SIG_DFL);
3172 kill(getpid(), signum);
3177 main(int argc, char** argv)
3184 signal(SIGINT, signal_handler);
3185 signal(SIGTERM, signal_handler);
3187 diag = rte_eal_init(argc, argv);
3189 rte_panic("Cannot init EAL\n");
3191 testpmd_logtype = rte_log_register("testpmd");
3192 if (testpmd_logtype < 0)
3193 rte_panic("Cannot register log type");
3194 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3196 ret = register_eth_event_callback();
3198 rte_panic("Cannot register for ethdev events");
3200 #ifdef RTE_LIBRTE_PDUMP
3201 /* initialize packet capture framework */
3206 RTE_ETH_FOREACH_DEV(port_id) {
3207 ports_ids[count] = port_id;
3210 nb_ports = (portid_t) count;
3212 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3214 /* allocate port structures, and init them */
3217 set_def_fwd_config();
3219 rte_panic("Empty set of forwarding logical cores - check the "
3220 "core mask supplied in the command parameters\n");
3222 /* Bitrate/latency stats disabled by default */
3223 #ifdef RTE_LIBRTE_BITRATE
3224 bitrate_enabled = 0;
3226 #ifdef RTE_LIBRTE_LATENCY_STATS
3227 latencystats_enabled = 0;
3230 /* on FreeBSD, mlockall() is disabled by default */
3231 #ifdef RTE_EXEC_ENV_FREEBSD
3240 launch_args_parse(argc, argv);
3242 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3243 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3247 if (tx_first && interactive)
3248 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3249 "interactive mode.\n");
3251 if (tx_first && lsc_interrupt) {
3252 printf("Warning: lsc_interrupt needs to be off when "
3253 " using tx_first. Disabling.\n");
3257 if (!nb_rxq && !nb_txq)
3258 printf("Warning: Either rx or tx queues should be non-zero\n");
3260 if (nb_rxq > 1 && nb_rxq > nb_txq)
3261 printf("Warning: nb_rxq=%d enables RSS configuration, "
3262 "but nb_txq=%d will prevent to fully test it.\n",
3268 ret = rte_dev_hotplug_handle_enable();
3271 "fail to enable hotplug handling.");
3275 ret = rte_dev_event_monitor_start();
3278 "fail to start device event monitoring.");
3282 ret = rte_dev_event_callback_register(NULL,
3283 dev_event_callback, NULL);
3286 "fail to register device event callback\n");
3291 if (start_port(RTE_PORT_ALL) != 0)
3292 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3294 /* set all ports to promiscuous mode by default */
3295 RTE_ETH_FOREACH_DEV(port_id)
3296 rte_eth_promiscuous_enable(port_id);
3298 /* Init metrics library */
3299 rte_metrics_init(rte_socket_id());
3301 #ifdef RTE_LIBRTE_LATENCY_STATS
3302 if (latencystats_enabled != 0) {
3303 int ret = rte_latencystats_init(1, NULL);
3305 printf("Warning: latencystats init()"
3306 " returned error %d\n", ret);
3307 printf("Latencystats running on lcore %d\n",
3308 latencystats_lcore_id);
3312 /* Setup bitrate stats */
3313 #ifdef RTE_LIBRTE_BITRATE
3314 if (bitrate_enabled != 0) {
3315 bitrate_data = rte_stats_bitrate_create();
3316 if (bitrate_data == NULL)
3317 rte_exit(EXIT_FAILURE,
3318 "Could not allocate bitrate data.\n");
3319 rte_stats_bitrate_reg(bitrate_data);
3323 #ifdef RTE_LIBRTE_CMDLINE
3324 if (strlen(cmdline_filename) != 0)
3325 cmdline_read_from_file(cmdline_filename);
3327 if (interactive == 1) {
3329 printf("Start automatic packet forwarding\n");
3330 start_packet_forwarding(0);
3342 printf("No commandline core given, start packet forwarding\n");
3343 start_packet_forwarding(tx_first);
3344 if (stats_period != 0) {
3345 uint64_t prev_time = 0, cur_time, diff_time = 0;
3346 uint64_t timer_period;
3348 /* Convert to number of cycles */
3349 timer_period = stats_period * rte_get_timer_hz();
3351 while (f_quit == 0) {
3352 cur_time = rte_get_timer_cycles();
3353 diff_time += cur_time - prev_time;
3355 if (diff_time >= timer_period) {
3357 /* Reset the timer */
3360 /* Sleep to avoid unnecessary checks */
3361 prev_time = cur_time;
3366 printf("Press enter to exit\n");
3367 rc = read(0, &c, 1);