1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
73 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
74 #define HUGE_FLAG (0x40000)
76 #define HUGE_FLAG MAP_HUGETLB
79 #ifndef MAP_HUGE_SHIFT
80 /* older kernels (or FreeBSD) will not have this define */
81 #define HUGE_SHIFT (26)
83 #define HUGE_SHIFT MAP_HUGE_SHIFT
86 #define EXTMEM_HEAP_NAME "extmem"
87 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
89 uint16_t verbose_level = 0; /**< Silent by default. */
90 int testpmd_logtype; /**< Log type for testpmd logs */
92 /* use main core for command line ? */
93 uint8_t interactive = 0;
94 uint8_t auto_start = 0;
96 char cmdline_filename[PATH_MAX] = {0};
99 * NUMA support configuration.
100 * When set, the NUMA support attempts to dispatch the allocation of the
101 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
102 * probed ports among the CPU sockets 0 and 1.
103 * Otherwise, all memory is allocated from CPU socket 0.
105 uint8_t numa_support = 1; /**< numa enabled by default */
108 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111 uint8_t socket_num = UMA_NO_CONFIG;
114 * Select mempool allocation type:
115 * - native: use regular DPDK memory
116 * - anon: use regular DPDK memory to create mempool, but populate using
117 * anonymous memory (may not be IOVA-contiguous)
118 * - xmem: use externally allocated hugepage memory
120 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
123 * Store specified sockets on which memory pool to be used by ports
126 uint8_t port_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which RX ring to be used by ports
132 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
135 * Store specified sockets on which TX ring to be used by ports
138 uint8_t txring_numa[RTE_MAX_ETHPORTS];
141 * Record the Ethernet address of peer target ports to which packets are
143 * Must be instantiated with the ethernet addresses of peer traffic generator
146 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
147 portid_t nb_peer_eth_addrs = 0;
150 * Probed Target Environment.
152 struct rte_port *ports; /**< For all probed ethernet ports. */
153 portid_t nb_ports; /**< Number of probed ethernet ports. */
154 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
155 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
157 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
160 * Test Forwarding Configuration.
161 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
162 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
164 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
165 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
166 portid_t nb_cfg_ports; /**< Number of configured ports. */
167 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
169 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
170 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
172 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
173 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
176 * Forwarding engines.
178 struct fwd_engine * fwd_engines[] = {
188 &five_tuple_swap_fwd_engine,
189 #ifdef RTE_LIBRTE_IEEE1588
190 &ieee1588_fwd_engine,
196 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
197 uint16_t mempool_flags;
199 struct fwd_config cur_fwd_config;
200 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
201 uint32_t retry_enabled;
202 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
203 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
205 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
206 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
207 DEFAULT_MBUF_DATA_SIZE
208 }; /**< Mbuf data space size. */
209 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
210 * specified on command-line. */
211 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
213 /** Extended statistics to show. */
214 struct rte_eth_xstat_name *xstats_display;
216 unsigned int xstats_display_num; /**< Size of extended statistics to show */
219 * In container, it cannot terminate the process which running with 'stats-period'
220 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
225 * Max Rx frame size, set by '--max-pkt-len' parameter.
227 uint32_t max_rx_pkt_len;
230 * Configuration of packet segments used to scatter received packets
231 * if some of split features is configured.
233 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
234 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
235 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
236 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
239 * Configuration of packet segments used by the "txonly" processing engine.
241 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
242 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
243 TXONLY_DEF_PACKET_LEN,
245 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
247 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
248 /**< Split policy for packets to TX. */
250 uint8_t txonly_multi_flow;
251 /**< Whether multiple flows are generated in TXONLY mode. */
253 uint32_t tx_pkt_times_inter;
254 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
256 uint32_t tx_pkt_times_intra;
257 /**< Timings for send scheduling in TXONLY mode, time between packets. */
259 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
260 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
261 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
262 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
264 /* current configuration is in DCB or not,0 means it is not in DCB mode */
265 uint8_t dcb_config = 0;
268 * Configurable number of RX/TX queues.
270 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
271 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
272 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
275 * Configurable number of RX/TX ring descriptors.
276 * Defaults are supplied by drivers via ethdev.
278 #define RTE_TEST_RX_DESC_DEFAULT 0
279 #define RTE_TEST_TX_DESC_DEFAULT 0
280 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
281 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
283 #define RTE_PMD_PARAM_UNSET -1
285 * Configurable values of RX and TX ring threshold registers.
288 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
289 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
290 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
292 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
293 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
294 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
297 * Configurable value of RX free threshold.
299 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
302 * Configurable value of RX drop enable.
304 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
307 * Configurable value of TX free threshold.
309 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
312 * Configurable value of TX RS bit threshold.
314 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
317 * Configurable value of buffered packets before sending.
319 uint16_t noisy_tx_sw_bufsz;
322 * Configurable value of packet buffer timeout.
324 uint16_t noisy_tx_sw_buf_flush_time;
327 * Configurable value for size of VNF internal memory area
328 * used for simulating noisy neighbour behaviour
330 uint64_t noisy_lkup_mem_sz;
333 * Configurable value of number of random writes done in
334 * VNF simulation memory area.
336 uint64_t noisy_lkup_num_writes;
339 * Configurable value of number of random reads done in
340 * VNF simulation memory area.
342 uint64_t noisy_lkup_num_reads;
345 * Configurable value of number of random reads/writes done in
346 * VNF simulation memory area.
348 uint64_t noisy_lkup_num_reads_writes;
351 * Receive Side Scaling (RSS) configuration.
353 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
356 * Port topology configuration
358 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
361 * Avoids to flush all the RX streams before starts forwarding.
363 uint8_t no_flush_rx = 0; /* flush by default */
366 * Flow API isolated mode.
368 uint8_t flow_isolate_all;
371 * Avoids to check link status when starting/stopping a port.
373 uint8_t no_link_check = 0; /* check by default */
376 * Don't automatically start all ports in interactive mode.
378 uint8_t no_device_start = 0;
381 * Enable link status change notification
383 uint8_t lsc_interrupt = 1; /* enabled by default */
386 * Enable device removal notification.
388 uint8_t rmv_interrupt = 1; /* enabled by default */
390 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
392 /* After attach, port setup is called on event or by iterator */
393 bool setup_on_probe_event = true;
395 /* Clear ptypes on port initialization. */
396 uint8_t clear_ptypes = true;
398 /* Hairpin ports configuration mode. */
399 uint16_t hairpin_mode;
401 /* Pretty printing of ethdev events */
402 static const char * const eth_event_desc[] = {
403 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
404 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
405 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
406 [RTE_ETH_EVENT_INTR_RESET] = "reset",
407 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
408 [RTE_ETH_EVENT_IPSEC] = "IPsec",
409 [RTE_ETH_EVENT_MACSEC] = "MACsec",
410 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
411 [RTE_ETH_EVENT_NEW] = "device probed",
412 [RTE_ETH_EVENT_DESTROY] = "device released",
413 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
414 [RTE_ETH_EVENT_MAX] = NULL,
418 * Display or mask ether events
419 * Default to all events except VF_MBOX
421 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
422 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
423 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
424 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
425 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
426 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
427 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
428 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
430 * Decide if all memory are locked for performance.
435 * NIC bypass mode configuration options.
438 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
439 /* The NIC bypass watchdog timeout. */
440 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
444 #ifdef RTE_LIB_LATENCYSTATS
447 * Set when latency stats is enabled in the commandline
449 uint8_t latencystats_enabled;
452 * Lcore ID to serive latency statistics.
454 lcoreid_t latencystats_lcore_id = -1;
459 * Ethernet device configuration.
461 struct rte_eth_rxmode rx_mode;
463 struct rte_eth_txmode tx_mode = {
464 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
467 struct rte_eth_fdir_conf fdir_conf = {
468 .mode = RTE_FDIR_MODE_NONE,
469 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
470 .status = RTE_FDIR_REPORT_STATUS,
472 .vlan_tci_mask = 0xFFEF,
474 .src_ip = 0xFFFFFFFF,
475 .dst_ip = 0xFFFFFFFF,
478 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
479 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
481 .src_port_mask = 0xFFFF,
482 .dst_port_mask = 0xFFFF,
483 .mac_addr_byte_mask = 0xFF,
484 .tunnel_type_mask = 1,
485 .tunnel_id_mask = 0xFFFFFFFF,
490 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
493 * Display zero values by default for xstats
495 uint8_t xstats_hide_zero;
498 * Measure of CPU cycles disabled by default
500 uint8_t record_core_cycles;
503 * Display of RX and TX bursts disabled by default
505 uint8_t record_burst_stats;
508 * Number of ports per shared Rx queue group, 0 disable.
512 unsigned int num_sockets = 0;
513 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
515 #ifdef RTE_LIB_BITRATESTATS
516 /* Bitrate statistics */
517 struct rte_stats_bitrates *bitrate_data;
518 lcoreid_t bitrate_lcore_id;
519 uint8_t bitrate_enabled;
523 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
524 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
528 * hexadecimal bitmask of RX mq mode can be enabled.
530 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
533 * Used to set forced link speed
535 uint32_t eth_link_speed;
538 * ID of the current process in multi-process, used to
539 * configure the queues to be polled.
544 * Number of processes in multi-process, used to
545 * configure the queues to be polled.
547 unsigned int num_procs = 1;
550 eth_rx_metadata_negotiate_mp(uint16_t port_id)
552 uint64_t rx_meta_features = 0;
555 if (!is_proc_primary())
558 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
559 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
560 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
562 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
564 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
565 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
569 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
570 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
574 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
575 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
578 } else if (ret != -ENOTSUP) {
579 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
580 port_id, rte_strerror(-ret));
585 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
586 const struct rte_eth_conf *dev_conf)
588 if (is_proc_primary())
589 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
595 eth_dev_start_mp(uint16_t port_id)
597 if (is_proc_primary())
598 return rte_eth_dev_start(port_id);
604 eth_dev_stop_mp(uint16_t port_id)
606 if (is_proc_primary())
607 return rte_eth_dev_stop(port_id);
613 mempool_free_mp(struct rte_mempool *mp)
615 if (is_proc_primary())
616 rte_mempool_free(mp);
620 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
622 if (is_proc_primary())
623 return rte_eth_dev_set_mtu(port_id, mtu);
628 /* Forward function declarations */
629 static void setup_attached_port(portid_t pi);
630 static void check_all_ports_link_status(uint32_t port_mask);
631 static int eth_event_callback(portid_t port_id,
632 enum rte_eth_event_type type,
633 void *param, void *ret_param);
634 static void dev_event_callback(const char *device_name,
635 enum rte_dev_event_type type,
637 static void fill_xstats_display_info(void);
640 * Check if all the ports are started.
641 * If yes, return positive value. If not, return zero.
643 static int all_ports_started(void);
646 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
647 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
650 /* Holds the registered mbuf dynamic flags names. */
651 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
655 * Helper function to check if socket is already discovered.
656 * If yes, return positive value. If not, return zero.
659 new_socket_id(unsigned int socket_id)
663 for (i = 0; i < num_sockets; i++) {
664 if (socket_ids[i] == socket_id)
671 * Setup default configuration.
674 set_default_fwd_lcores_config(void)
678 unsigned int sock_num;
681 for (i = 0; i < RTE_MAX_LCORE; i++) {
682 if (!rte_lcore_is_enabled(i))
684 sock_num = rte_lcore_to_socket_id(i);
685 if (new_socket_id(sock_num)) {
686 if (num_sockets >= RTE_MAX_NUMA_NODES) {
687 rte_exit(EXIT_FAILURE,
688 "Total sockets greater than %u\n",
691 socket_ids[num_sockets++] = sock_num;
693 if (i == rte_get_main_lcore())
695 fwd_lcores_cpuids[nb_lc++] = i;
697 nb_lcores = (lcoreid_t) nb_lc;
698 nb_cfg_lcores = nb_lcores;
703 set_def_peer_eth_addrs(void)
707 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
708 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
709 peer_eth_addrs[i].addr_bytes[5] = i;
714 set_default_fwd_ports_config(void)
719 RTE_ETH_FOREACH_DEV(pt_id) {
720 fwd_ports_ids[i++] = pt_id;
722 /* Update sockets info according to the attached device */
723 int socket_id = rte_eth_dev_socket_id(pt_id);
724 if (socket_id >= 0 && new_socket_id(socket_id)) {
725 if (num_sockets >= RTE_MAX_NUMA_NODES) {
726 rte_exit(EXIT_FAILURE,
727 "Total sockets greater than %u\n",
730 socket_ids[num_sockets++] = socket_id;
734 nb_cfg_ports = nb_ports;
735 nb_fwd_ports = nb_ports;
739 set_def_fwd_config(void)
741 set_default_fwd_lcores_config();
742 set_def_peer_eth_addrs();
743 set_default_fwd_ports_config();
746 #ifndef RTE_EXEC_ENV_WINDOWS
747 /* extremely pessimistic estimation of memory required to create a mempool */
749 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
751 unsigned int n_pages, mbuf_per_pg, leftover;
752 uint64_t total_mem, mbuf_mem, obj_sz;
754 /* there is no good way to predict how much space the mempool will
755 * occupy because it will allocate chunks on the fly, and some of those
756 * will come from default DPDK memory while some will come from our
757 * external memory, so just assume 128MB will be enough for everyone.
759 uint64_t hdr_mem = 128 << 20;
761 /* account for possible non-contiguousness */
762 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
764 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
768 mbuf_per_pg = pgsz / obj_sz;
769 leftover = (nb_mbufs % mbuf_per_pg) > 0;
770 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
772 mbuf_mem = n_pages * pgsz;
774 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
776 if (total_mem > SIZE_MAX) {
777 TESTPMD_LOG(ERR, "Memory size too big\n");
780 *out = (size_t)total_mem;
786 pagesz_flags(uint64_t page_sz)
788 /* as per mmap() manpage, all page sizes are log2 of page size
789 * shifted by MAP_HUGE_SHIFT
791 int log2 = rte_log2_u64(page_sz);
793 return (log2 << HUGE_SHIFT);
797 alloc_mem(size_t memsz, size_t pgsz, bool huge)
802 /* allocate anonymous hugepages */
803 flags = MAP_ANONYMOUS | MAP_PRIVATE;
805 flags |= HUGE_FLAG | pagesz_flags(pgsz);
807 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
808 if (addr == MAP_FAILED)
814 struct extmem_param {
818 rte_iova_t *iova_table;
819 unsigned int iova_table_len;
823 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
826 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
827 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
828 unsigned int cur_page, n_pages, pgsz_idx;
829 size_t mem_sz, cur_pgsz;
830 rte_iova_t *iovas = NULL;
834 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
835 /* skip anything that is too big */
836 if (pgsizes[pgsz_idx] > SIZE_MAX)
839 cur_pgsz = pgsizes[pgsz_idx];
841 /* if we were told not to allocate hugepages, override */
843 cur_pgsz = sysconf(_SC_PAGESIZE);
845 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
847 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
851 /* allocate our memory */
852 addr = alloc_mem(mem_sz, cur_pgsz, huge);
854 /* if we couldn't allocate memory with a specified page size,
855 * that doesn't mean we can't do it with other page sizes, so
861 /* store IOVA addresses for every page in this memory area */
862 n_pages = mem_sz / cur_pgsz;
864 iovas = malloc(sizeof(*iovas) * n_pages);
867 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
870 /* lock memory if it's not huge pages */
874 /* populate IOVA addresses */
875 for (cur_page = 0; cur_page < n_pages; cur_page++) {
880 offset = cur_pgsz * cur_page;
881 cur = RTE_PTR_ADD(addr, offset);
883 /* touch the page before getting its IOVA */
884 *(volatile char *)cur = 0;
886 iova = rte_mem_virt2iova(cur);
888 iovas[cur_page] = iova;
893 /* if we couldn't allocate anything */
899 param->pgsz = cur_pgsz;
900 param->iova_table = iovas;
901 param->iova_table_len = n_pages;
908 munmap(addr, mem_sz);
914 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
916 struct extmem_param param;
919 memset(¶m, 0, sizeof(param));
921 /* check if our heap exists */
922 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
924 /* create our heap */
925 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
927 TESTPMD_LOG(ERR, "Cannot create heap\n");
932 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
934 TESTPMD_LOG(ERR, "Cannot create memory area\n");
938 /* we now have a valid memory area, so add it to heap */
939 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
940 param.addr, param.len, param.iova_table,
941 param.iova_table_len, param.pgsz);
943 /* when using VFIO, memory is automatically mapped for DMA by EAL */
945 /* not needed any more */
946 free(param.iova_table);
949 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
950 munmap(param.addr, param.len);
956 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
962 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
963 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
968 RTE_ETH_FOREACH_DEV(pid) {
969 struct rte_eth_dev_info dev_info;
971 ret = eth_dev_info_get_print_err(pid, &dev_info);
974 "unable to get device info for port %d on addr 0x%p,"
975 "mempool unmapping will not be performed\n",
980 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
983 "unable to DMA unmap addr 0x%p "
985 memhdr->addr, dev_info.device->name);
988 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
991 "unable to un-register addr 0x%p\n", memhdr->addr);
996 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
997 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1000 size_t page_size = sysconf(_SC_PAGESIZE);
1003 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1007 "unable to register addr 0x%p\n", memhdr->addr);
1010 RTE_ETH_FOREACH_DEV(pid) {
1011 struct rte_eth_dev_info dev_info;
1013 ret = eth_dev_info_get_print_err(pid, &dev_info);
1016 "unable to get device info for port %d on addr 0x%p,"
1017 "mempool mapping will not be performed\n",
1021 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1024 "unable to DMA map addr 0x%p "
1026 memhdr->addr, dev_info.device->name);
1033 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1034 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1036 struct rte_pktmbuf_extmem *xmem;
1037 unsigned int ext_num, zone_num, elt_num;
1040 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1041 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1042 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1044 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1046 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1047 "external buffer descriptors\n");
1051 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1052 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1053 const struct rte_memzone *mz;
1054 char mz_name[RTE_MEMZONE_NAMESIZE];
1057 ret = snprintf(mz_name, sizeof(mz_name),
1058 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1059 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1060 errno = ENAMETOOLONG;
1064 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1066 RTE_MEMZONE_IOVA_CONTIG |
1068 RTE_MEMZONE_SIZE_HINT_ONLY,
1072 * The caller exits on external buffer creation
1073 * error, so there is no need to free memzones.
1079 xseg->buf_ptr = mz->addr;
1080 xseg->buf_iova = mz->iova;
1081 xseg->buf_len = EXTBUF_ZONE_SIZE;
1082 xseg->elt_size = elt_size;
1084 if (ext_num == 0 && xmem != NULL) {
1093 * Configuration initialisation done once at init time.
1095 static struct rte_mempool *
1096 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1097 unsigned int socket_id, uint16_t size_idx)
1099 char pool_name[RTE_MEMPOOL_NAMESIZE];
1100 struct rte_mempool *rte_mp = NULL;
1101 #ifndef RTE_EXEC_ENV_WINDOWS
1104 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1106 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1107 if (!is_proc_primary()) {
1108 rte_mp = rte_mempool_lookup(pool_name);
1110 rte_exit(EXIT_FAILURE,
1111 "Get mbuf pool for socket %u failed: %s\n",
1112 socket_id, rte_strerror(rte_errno));
1117 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1118 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1120 switch (mp_alloc_type) {
1121 case MP_ALLOC_NATIVE:
1123 /* wrapper to rte_mempool_create() */
1124 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1125 rte_mbuf_best_mempool_ops());
1126 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1127 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1130 #ifndef RTE_EXEC_ENV_WINDOWS
1133 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1134 mb_size, (unsigned int) mb_mempool_cache,
1135 sizeof(struct rte_pktmbuf_pool_private),
1136 socket_id, mempool_flags);
1140 if (rte_mempool_populate_anon(rte_mp) == 0) {
1141 rte_mempool_free(rte_mp);
1145 rte_pktmbuf_pool_init(rte_mp, NULL);
1146 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1147 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1151 case MP_ALLOC_XMEM_HUGE:
1154 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1156 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1157 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1160 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1161 if (heap_socket < 0)
1162 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1164 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1165 rte_mbuf_best_mempool_ops());
1166 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1167 mb_mempool_cache, 0, mbuf_seg_size,
1174 struct rte_pktmbuf_extmem *ext_mem;
1175 unsigned int ext_num;
1177 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1178 socket_id, pool_name, &ext_mem);
1180 rte_exit(EXIT_FAILURE,
1181 "Can't create pinned data buffers\n");
1183 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1184 rte_mbuf_best_mempool_ops());
1185 rte_mp = rte_pktmbuf_pool_create_extbuf
1186 (pool_name, nb_mbuf, mb_mempool_cache,
1187 0, mbuf_seg_size, socket_id,
1194 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1198 #ifndef RTE_EXEC_ENV_WINDOWS
1201 if (rte_mp == NULL) {
1202 rte_exit(EXIT_FAILURE,
1203 "Creation of mbuf pool for socket %u failed: %s\n",
1204 socket_id, rte_strerror(rte_errno));
1205 } else if (verbose_level > 0) {
1206 rte_mempool_dump(stdout, rte_mp);
1212 * Check given socket id is valid or not with NUMA mode,
1213 * if valid, return 0, else return -1
1216 check_socket_id(const unsigned int socket_id)
1218 static int warning_once = 0;
1220 if (new_socket_id(socket_id)) {
1221 if (!warning_once && numa_support)
1223 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1231 * Get the allowed maximum number of RX queues.
1232 * *pid return the port id which has minimal value of
1233 * max_rx_queues in all ports.
1236 get_allowed_max_nb_rxq(portid_t *pid)
1238 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1239 bool max_rxq_valid = false;
1241 struct rte_eth_dev_info dev_info;
1243 RTE_ETH_FOREACH_DEV(pi) {
1244 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1247 max_rxq_valid = true;
1248 if (dev_info.max_rx_queues < allowed_max_rxq) {
1249 allowed_max_rxq = dev_info.max_rx_queues;
1253 return max_rxq_valid ? allowed_max_rxq : 0;
1257 * Check input rxq is valid or not.
1258 * If input rxq is not greater than any of maximum number
1259 * of RX queues of all ports, it is valid.
1260 * if valid, return 0, else return -1
1263 check_nb_rxq(queueid_t rxq)
1265 queueid_t allowed_max_rxq;
1268 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1269 if (rxq > allowed_max_rxq) {
1271 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1272 rxq, allowed_max_rxq, pid);
1279 * Get the allowed maximum number of TX queues.
1280 * *pid return the port id which has minimal value of
1281 * max_tx_queues in all ports.
1284 get_allowed_max_nb_txq(portid_t *pid)
1286 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1287 bool max_txq_valid = false;
1289 struct rte_eth_dev_info dev_info;
1291 RTE_ETH_FOREACH_DEV(pi) {
1292 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1295 max_txq_valid = true;
1296 if (dev_info.max_tx_queues < allowed_max_txq) {
1297 allowed_max_txq = dev_info.max_tx_queues;
1301 return max_txq_valid ? allowed_max_txq : 0;
1305 * Check input txq is valid or not.
1306 * If input txq is not greater than any of maximum number
1307 * of TX queues of all ports, it is valid.
1308 * if valid, return 0, else return -1
1311 check_nb_txq(queueid_t txq)
1313 queueid_t allowed_max_txq;
1316 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1317 if (txq > allowed_max_txq) {
1319 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1320 txq, allowed_max_txq, pid);
1327 * Get the allowed maximum number of RXDs of every rx queue.
1328 * *pid return the port id which has minimal value of
1329 * max_rxd in all queues of all ports.
1332 get_allowed_max_nb_rxd(portid_t *pid)
1334 uint16_t allowed_max_rxd = UINT16_MAX;
1336 struct rte_eth_dev_info dev_info;
1338 RTE_ETH_FOREACH_DEV(pi) {
1339 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1342 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1343 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1347 return allowed_max_rxd;
1351 * Get the allowed minimal number of RXDs of every rx queue.
1352 * *pid return the port id which has minimal value of
1353 * min_rxd in all queues of all ports.
1356 get_allowed_min_nb_rxd(portid_t *pid)
1358 uint16_t allowed_min_rxd = 0;
1360 struct rte_eth_dev_info dev_info;
1362 RTE_ETH_FOREACH_DEV(pi) {
1363 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1366 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1367 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1372 return allowed_min_rxd;
1376 * Check input rxd is valid or not.
1377 * If input rxd is not greater than any of maximum number
1378 * of RXDs of every Rx queues and is not less than any of
1379 * minimal number of RXDs of every Rx queues, it is valid.
1380 * if valid, return 0, else return -1
1383 check_nb_rxd(queueid_t rxd)
1385 uint16_t allowed_max_rxd;
1386 uint16_t allowed_min_rxd;
1389 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1390 if (rxd > allowed_max_rxd) {
1392 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1393 rxd, allowed_max_rxd, pid);
1397 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1398 if (rxd < allowed_min_rxd) {
1400 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1401 rxd, allowed_min_rxd, pid);
1409 * Get the allowed maximum number of TXDs of every rx queues.
1410 * *pid return the port id which has minimal value of
1411 * max_txd in every tx queue.
1414 get_allowed_max_nb_txd(portid_t *pid)
1416 uint16_t allowed_max_txd = UINT16_MAX;
1418 struct rte_eth_dev_info dev_info;
1420 RTE_ETH_FOREACH_DEV(pi) {
1421 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1424 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1425 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1429 return allowed_max_txd;
1433 * Get the allowed maximum number of TXDs of every tx queues.
1434 * *pid return the port id which has minimal value of
1435 * min_txd in every tx queue.
1438 get_allowed_min_nb_txd(portid_t *pid)
1440 uint16_t allowed_min_txd = 0;
1442 struct rte_eth_dev_info dev_info;
1444 RTE_ETH_FOREACH_DEV(pi) {
1445 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1448 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1449 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1454 return allowed_min_txd;
1458 * Check input txd is valid or not.
1459 * If input txd is not greater than any of maximum number
1460 * of TXDs of every Rx queues, it is valid.
1461 * if valid, return 0, else return -1
1464 check_nb_txd(queueid_t txd)
1466 uint16_t allowed_max_txd;
1467 uint16_t allowed_min_txd;
1470 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1471 if (txd > allowed_max_txd) {
1473 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1474 txd, allowed_max_txd, pid);
1478 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1479 if (txd < allowed_min_txd) {
1481 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1482 txd, allowed_min_txd, pid);
1490 * Get the allowed maximum number of hairpin queues.
1491 * *pid return the port id which has minimal value of
1492 * max_hairpin_queues in all ports.
1495 get_allowed_max_nb_hairpinq(portid_t *pid)
1497 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1499 struct rte_eth_hairpin_cap cap;
1501 RTE_ETH_FOREACH_DEV(pi) {
1502 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1506 if (cap.max_nb_queues < allowed_max_hairpinq) {
1507 allowed_max_hairpinq = cap.max_nb_queues;
1511 return allowed_max_hairpinq;
1515 * Check input hairpin is valid or not.
1516 * If input hairpin is not greater than any of maximum number
1517 * of hairpin queues of all ports, it is valid.
1518 * if valid, return 0, else return -1
1521 check_nb_hairpinq(queueid_t hairpinq)
1523 queueid_t allowed_max_hairpinq;
1526 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1527 if (hairpinq > allowed_max_hairpinq) {
1529 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1530 hairpinq, allowed_max_hairpinq, pid);
1537 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1539 uint32_t eth_overhead;
1541 if (dev_info->max_mtu != UINT16_MAX &&
1542 dev_info->max_rx_pktlen > dev_info->max_mtu)
1543 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1545 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1547 return eth_overhead;
1551 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1553 struct rte_port *port = &ports[pid];
1557 eth_rx_metadata_negotiate_mp(pid);
1559 port->dev_conf.txmode = tx_mode;
1560 port->dev_conf.rxmode = rx_mode;
1562 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1564 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1566 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1567 port->dev_conf.txmode.offloads &=
1568 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1570 /* Apply Rx offloads configuration */
1571 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1572 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1573 /* Apply Tx offloads configuration */
1574 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1575 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1578 port->dev_conf.link_speeds = eth_link_speed;
1581 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1582 get_eth_overhead(&port->dev_info);
1584 /* set flag to initialize port/queue */
1585 port->need_reconfig = 1;
1586 port->need_reconfig_queues = 1;
1587 port->socket_id = socket_id;
1588 port->tx_metadata = 0;
1591 * Check for maximum number of segments per MTU.
1592 * Accordingly update the mbuf data size.
1594 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1595 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1596 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1599 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1600 uint16_t data_size = (mtu + eth_overhead) /
1601 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1602 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1604 if (buffer_size > mbuf_data_size[0]) {
1605 mbuf_data_size[0] = buffer_size;
1606 TESTPMD_LOG(WARNING,
1607 "Configured mbuf size of the first segment %hu\n",
1618 struct rte_mempool *mbp;
1619 unsigned int nb_mbuf_per_pool;
1622 struct rte_gro_param gro_param;
1628 /* Configuration of logical cores. */
1629 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1630 sizeof(struct fwd_lcore *) * nb_lcores,
1631 RTE_CACHE_LINE_SIZE);
1632 if (fwd_lcores == NULL) {
1633 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1634 "failed\n", nb_lcores);
1636 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1637 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1638 sizeof(struct fwd_lcore),
1639 RTE_CACHE_LINE_SIZE);
1640 if (fwd_lcores[lc_id] == NULL) {
1641 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1644 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1647 RTE_ETH_FOREACH_DEV(pid) {
1651 socket_id = port_numa[pid];
1652 if (port_numa[pid] == NUMA_NO_CONFIG) {
1653 socket_id = rte_eth_dev_socket_id(pid);
1656 * if socket_id is invalid,
1657 * set to the first available socket.
1659 if (check_socket_id(socket_id) < 0)
1660 socket_id = socket_ids[0];
1663 socket_id = (socket_num == UMA_NO_CONFIG) ?
1666 /* Apply default TxRx configuration for all ports */
1667 init_config_port_offloads(pid, socket_id);
1670 * Create pools of mbuf.
1671 * If NUMA support is disabled, create a single pool of mbuf in
1672 * socket 0 memory by default.
1673 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1675 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1676 * nb_txd can be configured at run time.
1678 if (param_total_num_mbufs)
1679 nb_mbuf_per_pool = param_total_num_mbufs;
1681 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1682 (nb_lcores * mb_mempool_cache) +
1683 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1684 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1690 for (i = 0; i < num_sockets; i++)
1691 for (j = 0; j < mbuf_data_size_n; j++)
1692 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1693 mbuf_pool_create(mbuf_data_size[j],
1699 for (i = 0; i < mbuf_data_size_n; i++)
1700 mempools[i] = mbuf_pool_create
1703 socket_num == UMA_NO_CONFIG ?
1710 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1711 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1714 * Records which Mbuf pool to use by each logical core, if needed.
1716 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1717 mbp = mbuf_pool_find(
1718 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1721 mbp = mbuf_pool_find(0, 0);
1722 fwd_lcores[lc_id]->mbp = mbp;
1724 /* initialize GSO context */
1725 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1726 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1727 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1728 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1730 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1737 /* create a gro context for each lcore */
1738 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1739 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1740 gro_param.max_item_per_flow = MAX_PKT_BURST;
1741 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1742 gro_param.socket_id = rte_lcore_to_socket_id(
1743 fwd_lcores_cpuids[lc_id]);
1744 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1745 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1746 rte_exit(EXIT_FAILURE,
1747 "rte_gro_ctx_create() failed\n");
1755 reconfig(portid_t new_port_id, unsigned socket_id)
1757 /* Reconfiguration of Ethernet ports. */
1758 init_config_port_offloads(new_port_id, socket_id);
1764 init_fwd_streams(void)
1767 struct rte_port *port;
1768 streamid_t sm_id, nb_fwd_streams_new;
1771 /* set socket id according to numa or not */
1772 RTE_ETH_FOREACH_DEV(pid) {
1774 if (nb_rxq > port->dev_info.max_rx_queues) {
1776 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1777 nb_rxq, port->dev_info.max_rx_queues);
1780 if (nb_txq > port->dev_info.max_tx_queues) {
1782 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1783 nb_txq, port->dev_info.max_tx_queues);
1787 if (port_numa[pid] != NUMA_NO_CONFIG)
1788 port->socket_id = port_numa[pid];
1790 port->socket_id = rte_eth_dev_socket_id(pid);
1793 * if socket_id is invalid,
1794 * set to the first available socket.
1796 if (check_socket_id(port->socket_id) < 0)
1797 port->socket_id = socket_ids[0];
1801 if (socket_num == UMA_NO_CONFIG)
1802 port->socket_id = 0;
1804 port->socket_id = socket_num;
1808 q = RTE_MAX(nb_rxq, nb_txq);
1811 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1814 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1815 if (nb_fwd_streams_new == nb_fwd_streams)
1818 if (fwd_streams != NULL) {
1819 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1820 if (fwd_streams[sm_id] == NULL)
1822 rte_free(fwd_streams[sm_id]);
1823 fwd_streams[sm_id] = NULL;
1825 rte_free(fwd_streams);
1830 nb_fwd_streams = nb_fwd_streams_new;
1831 if (nb_fwd_streams) {
1832 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1833 sizeof(struct fwd_stream *) * nb_fwd_streams,
1834 RTE_CACHE_LINE_SIZE);
1835 if (fwd_streams == NULL)
1836 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1837 " (struct fwd_stream *)) failed\n",
1840 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1841 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1842 " struct fwd_stream", sizeof(struct fwd_stream),
1843 RTE_CACHE_LINE_SIZE);
1844 if (fwd_streams[sm_id] == NULL)
1845 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1846 "(struct fwd_stream) failed\n");
1854 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1856 uint64_t total_burst, sburst;
1858 uint64_t burst_stats[4];
1859 uint16_t pktnb_stats[4];
1861 int burst_percent[4], sburstp;
1865 * First compute the total number of packet bursts and the
1866 * two highest numbers of bursts of the same number of packets.
1868 memset(&burst_stats, 0x0, sizeof(burst_stats));
1869 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1871 /* Show stats for 0 burst size always */
1872 total_burst = pbs->pkt_burst_spread[0];
1873 burst_stats[0] = pbs->pkt_burst_spread[0];
1876 /* Find the next 2 burst sizes with highest occurrences. */
1877 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1878 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1883 total_burst += nb_burst;
1885 if (nb_burst > burst_stats[1]) {
1886 burst_stats[2] = burst_stats[1];
1887 pktnb_stats[2] = pktnb_stats[1];
1888 burst_stats[1] = nb_burst;
1889 pktnb_stats[1] = nb_pkt;
1890 } else if (nb_burst > burst_stats[2]) {
1891 burst_stats[2] = nb_burst;
1892 pktnb_stats[2] = nb_pkt;
1895 if (total_burst == 0)
1898 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1899 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1901 printf("%d%% of other]\n", 100 - sburstp);
1905 sburst += burst_stats[i];
1906 if (sburst == total_burst) {
1907 printf("%d%% of %d pkts]\n",
1908 100 - sburstp, (int) pktnb_stats[i]);
1913 (double)burst_stats[i] / total_burst * 100;
1914 printf("%d%% of %d pkts + ",
1915 burst_percent[i], (int) pktnb_stats[i]);
1916 sburstp += burst_percent[i];
1921 fwd_stream_stats_display(streamid_t stream_id)
1923 struct fwd_stream *fs;
1924 static const char *fwd_top_stats_border = "-------";
1926 fs = fwd_streams[stream_id];
1927 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1928 (fs->fwd_dropped == 0))
1930 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1931 "TX Port=%2d/Queue=%2d %s\n",
1932 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1933 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1934 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1935 " TX-dropped: %-14"PRIu64,
1936 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1938 /* if checksum mode */
1939 if (cur_fwd_eng == &csum_fwd_engine) {
1940 printf(" RX- bad IP checksum: %-14"PRIu64
1941 " Rx- bad L4 checksum: %-14"PRIu64
1942 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1943 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1944 fs->rx_bad_outer_l4_csum);
1945 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1946 fs->rx_bad_outer_ip_csum);
1951 if (record_burst_stats) {
1952 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1953 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1958 fwd_stats_display(void)
1960 static const char *fwd_stats_border = "----------------------";
1961 static const char *acc_stats_border = "+++++++++++++++";
1963 struct fwd_stream *rx_stream;
1964 struct fwd_stream *tx_stream;
1965 uint64_t tx_dropped;
1966 uint64_t rx_bad_ip_csum;
1967 uint64_t rx_bad_l4_csum;
1968 uint64_t rx_bad_outer_l4_csum;
1969 uint64_t rx_bad_outer_ip_csum;
1970 } ports_stats[RTE_MAX_ETHPORTS];
1971 uint64_t total_rx_dropped = 0;
1972 uint64_t total_tx_dropped = 0;
1973 uint64_t total_rx_nombuf = 0;
1974 struct rte_eth_stats stats;
1975 uint64_t fwd_cycles = 0;
1976 uint64_t total_recv = 0;
1977 uint64_t total_xmit = 0;
1978 struct rte_port *port;
1983 memset(ports_stats, 0, sizeof(ports_stats));
1985 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1986 struct fwd_stream *fs = fwd_streams[sm_id];
1988 if (cur_fwd_config.nb_fwd_streams >
1989 cur_fwd_config.nb_fwd_ports) {
1990 fwd_stream_stats_display(sm_id);
1992 ports_stats[fs->tx_port].tx_stream = fs;
1993 ports_stats[fs->rx_port].rx_stream = fs;
1996 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1998 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1999 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2000 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2001 fs->rx_bad_outer_l4_csum;
2002 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2003 fs->rx_bad_outer_ip_csum;
2005 if (record_core_cycles)
2006 fwd_cycles += fs->core_cycles;
2008 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2009 pt_id = fwd_ports_ids[i];
2010 port = &ports[pt_id];
2012 rte_eth_stats_get(pt_id, &stats);
2013 stats.ipackets -= port->stats.ipackets;
2014 stats.opackets -= port->stats.opackets;
2015 stats.ibytes -= port->stats.ibytes;
2016 stats.obytes -= port->stats.obytes;
2017 stats.imissed -= port->stats.imissed;
2018 stats.oerrors -= port->stats.oerrors;
2019 stats.rx_nombuf -= port->stats.rx_nombuf;
2021 total_recv += stats.ipackets;
2022 total_xmit += stats.opackets;
2023 total_rx_dropped += stats.imissed;
2024 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2025 total_tx_dropped += stats.oerrors;
2026 total_rx_nombuf += stats.rx_nombuf;
2028 printf("\n %s Forward statistics for port %-2d %s\n",
2029 fwd_stats_border, pt_id, fwd_stats_border);
2031 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2032 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2033 stats.ipackets + stats.imissed);
2035 if (cur_fwd_eng == &csum_fwd_engine) {
2036 printf(" Bad-ipcsum: %-14"PRIu64
2037 " Bad-l4csum: %-14"PRIu64
2038 "Bad-outer-l4csum: %-14"PRIu64"\n",
2039 ports_stats[pt_id].rx_bad_ip_csum,
2040 ports_stats[pt_id].rx_bad_l4_csum,
2041 ports_stats[pt_id].rx_bad_outer_l4_csum);
2042 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2043 ports_stats[pt_id].rx_bad_outer_ip_csum);
2045 if (stats.ierrors + stats.rx_nombuf > 0) {
2046 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2047 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2050 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2051 "TX-total: %-"PRIu64"\n",
2052 stats.opackets, ports_stats[pt_id].tx_dropped,
2053 stats.opackets + ports_stats[pt_id].tx_dropped);
2055 if (record_burst_stats) {
2056 if (ports_stats[pt_id].rx_stream)
2057 pkt_burst_stats_display("RX",
2058 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2059 if (ports_stats[pt_id].tx_stream)
2060 pkt_burst_stats_display("TX",
2061 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2064 printf(" %s--------------------------------%s\n",
2065 fwd_stats_border, fwd_stats_border);
2068 printf("\n %s Accumulated forward statistics for all ports"
2070 acc_stats_border, acc_stats_border);
2071 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2073 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2075 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2076 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2077 if (total_rx_nombuf > 0)
2078 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2079 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2081 acc_stats_border, acc_stats_border);
2082 if (record_core_cycles) {
2083 #define CYC_PER_MHZ 1E6
2084 if (total_recv > 0 || total_xmit > 0) {
2085 uint64_t total_pkts = 0;
2086 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2087 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2088 total_pkts = total_xmit;
2090 total_pkts = total_recv;
2092 printf("\n CPU cycles/packet=%.2F (total cycles="
2093 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2095 (double) fwd_cycles / total_pkts,
2096 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2097 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2103 fwd_stats_reset(void)
2109 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2110 pt_id = fwd_ports_ids[i];
2111 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2113 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2114 struct fwd_stream *fs = fwd_streams[sm_id];
2118 fs->fwd_dropped = 0;
2119 fs->rx_bad_ip_csum = 0;
2120 fs->rx_bad_l4_csum = 0;
2121 fs->rx_bad_outer_l4_csum = 0;
2122 fs->rx_bad_outer_ip_csum = 0;
2124 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2125 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2126 fs->core_cycles = 0;
2131 flush_fwd_rx_queues(void)
2133 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2140 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2141 uint64_t timer_period;
2143 if (num_procs > 1) {
2144 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2148 /* convert to number of cycles */
2149 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2151 for (j = 0; j < 2; j++) {
2152 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2153 for (rxq = 0; rxq < nb_rxq; rxq++) {
2154 port_id = fwd_ports_ids[rxp];
2156 * testpmd can stuck in the below do while loop
2157 * if rte_eth_rx_burst() always returns nonzero
2158 * packets. So timer is added to exit this loop
2159 * after 1sec timer expiry.
2161 prev_tsc = rte_rdtsc();
2163 nb_rx = rte_eth_rx_burst(port_id, rxq,
2164 pkts_burst, MAX_PKT_BURST);
2165 for (i = 0; i < nb_rx; i++)
2166 rte_pktmbuf_free(pkts_burst[i]);
2168 cur_tsc = rte_rdtsc();
2169 diff_tsc = cur_tsc - prev_tsc;
2170 timer_tsc += diff_tsc;
2171 } while ((nb_rx > 0) &&
2172 (timer_tsc < timer_period));
2176 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2181 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2183 struct fwd_stream **fsm;
2186 #ifdef RTE_LIB_BITRATESTATS
2187 uint64_t tics_per_1sec;
2188 uint64_t tics_datum;
2189 uint64_t tics_current;
2190 uint16_t i, cnt_ports;
2192 cnt_ports = nb_ports;
2193 tics_datum = rte_rdtsc();
2194 tics_per_1sec = rte_get_timer_hz();
2196 fsm = &fwd_streams[fc->stream_idx];
2197 nb_fs = fc->stream_nb;
2199 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2200 (*pkt_fwd)(fsm[sm_id]);
2201 #ifdef RTE_LIB_BITRATESTATS
2202 if (bitrate_enabled != 0 &&
2203 bitrate_lcore_id == rte_lcore_id()) {
2204 tics_current = rte_rdtsc();
2205 if (tics_current - tics_datum >= tics_per_1sec) {
2206 /* Periodic bitrate calculation */
2207 for (i = 0; i < cnt_ports; i++)
2208 rte_stats_bitrate_calc(bitrate_data,
2210 tics_datum = tics_current;
2214 #ifdef RTE_LIB_LATENCYSTATS
2215 if (latencystats_enabled != 0 &&
2216 latencystats_lcore_id == rte_lcore_id())
2217 rte_latencystats_update();
2220 } while (! fc->stopped);
2224 start_pkt_forward_on_core(void *fwd_arg)
2226 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2227 cur_fwd_config.fwd_eng->packet_fwd);
2232 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2233 * Used to start communication flows in network loopback test configurations.
2236 run_one_txonly_burst_on_core(void *fwd_arg)
2238 struct fwd_lcore *fwd_lc;
2239 struct fwd_lcore tmp_lcore;
2241 fwd_lc = (struct fwd_lcore *) fwd_arg;
2242 tmp_lcore = *fwd_lc;
2243 tmp_lcore.stopped = 1;
2244 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2249 * Launch packet forwarding:
2250 * - Setup per-port forwarding context.
2251 * - launch logical cores with their forwarding configuration.
2254 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2260 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2261 lc_id = fwd_lcores_cpuids[i];
2262 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2263 fwd_lcores[i]->stopped = 0;
2264 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2265 fwd_lcores[i], lc_id);
2268 "launch lcore %u failed - diag=%d\n",
2275 * Launch packet forwarding configuration.
2278 start_packet_forwarding(int with_tx_first)
2280 port_fwd_begin_t port_fwd_begin;
2281 port_fwd_end_t port_fwd_end;
2284 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2285 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2287 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2288 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2290 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2291 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2292 (!nb_rxq || !nb_txq))
2293 rte_exit(EXIT_FAILURE,
2294 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2295 cur_fwd_eng->fwd_mode_name);
2297 if (all_ports_started() == 0) {
2298 fprintf(stderr, "Not all ports were started\n");
2301 if (test_done == 0) {
2302 fprintf(stderr, "Packet forwarding already started\n");
2308 pkt_fwd_config_display(&cur_fwd_config);
2309 if (!pkt_fwd_shared_rxq_check())
2312 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2313 if (port_fwd_begin != NULL) {
2314 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2315 if (port_fwd_begin(fwd_ports_ids[i])) {
2317 "Packet forwarding is not ready\n");
2323 if (with_tx_first) {
2324 port_fwd_begin = tx_only_engine.port_fwd_begin;
2325 if (port_fwd_begin != NULL) {
2326 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2327 if (port_fwd_begin(fwd_ports_ids[i])) {
2329 "Packet forwarding is not ready\n");
2339 flush_fwd_rx_queues();
2341 rxtx_config_display();
2344 if (with_tx_first) {
2345 while (with_tx_first--) {
2346 launch_packet_forwarding(
2347 run_one_txonly_burst_on_core);
2348 rte_eal_mp_wait_lcore();
2350 port_fwd_end = tx_only_engine.port_fwd_end;
2351 if (port_fwd_end != NULL) {
2352 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2353 (*port_fwd_end)(fwd_ports_ids[i]);
2356 launch_packet_forwarding(start_pkt_forward_on_core);
2360 stop_packet_forwarding(void)
2362 port_fwd_end_t port_fwd_end;
2368 fprintf(stderr, "Packet forwarding not started\n");
2371 printf("Telling cores to stop...");
2372 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2373 fwd_lcores[lc_id]->stopped = 1;
2374 printf("\nWaiting for lcores to finish...\n");
2375 rte_eal_mp_wait_lcore();
2376 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2377 if (port_fwd_end != NULL) {
2378 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2379 pt_id = fwd_ports_ids[i];
2380 (*port_fwd_end)(pt_id);
2384 fwd_stats_display();
2386 printf("\nDone.\n");
2391 dev_set_link_up(portid_t pid)
2393 if (rte_eth_dev_set_link_up(pid) < 0)
2394 fprintf(stderr, "\nSet link up fail.\n");
2398 dev_set_link_down(portid_t pid)
2400 if (rte_eth_dev_set_link_down(pid) < 0)
2401 fprintf(stderr, "\nSet link down fail.\n");
2405 all_ports_started(void)
2408 struct rte_port *port;
2410 RTE_ETH_FOREACH_DEV(pi) {
2412 /* Check if there is a port which is not started */
2413 if ((port->port_status != RTE_PORT_STARTED) &&
2414 (port->slave_flag == 0))
2418 /* No port is not started */
2423 port_is_stopped(portid_t port_id)
2425 struct rte_port *port = &ports[port_id];
2427 if ((port->port_status != RTE_PORT_STOPPED) &&
2428 (port->slave_flag == 0))
2434 all_ports_stopped(void)
2438 RTE_ETH_FOREACH_DEV(pi) {
2439 if (!port_is_stopped(pi))
2447 port_is_started(portid_t port_id)
2449 if (port_id_is_invalid(port_id, ENABLED_WARN))
2452 if (ports[port_id].port_status != RTE_PORT_STARTED)
2458 /* Configure the Rx and Tx hairpin queues for the selected port. */
2460 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2463 struct rte_eth_hairpin_conf hairpin_conf = {
2468 struct rte_port *port = &ports[pi];
2469 uint16_t peer_rx_port = pi;
2470 uint16_t peer_tx_port = pi;
2471 uint32_t manual = 1;
2472 uint32_t tx_exp = hairpin_mode & 0x10;
2474 if (!(hairpin_mode & 0xf)) {
2478 } else if (hairpin_mode & 0x1) {
2479 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2480 RTE_ETH_DEV_NO_OWNER);
2481 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2482 peer_tx_port = rte_eth_find_next_owned_by(0,
2483 RTE_ETH_DEV_NO_OWNER);
2484 if (p_pi != RTE_MAX_ETHPORTS) {
2485 peer_rx_port = p_pi;
2489 /* Last port will be the peer RX port of the first. */
2490 RTE_ETH_FOREACH_DEV(next_pi)
2491 peer_rx_port = next_pi;
2494 } else if (hairpin_mode & 0x2) {
2496 peer_rx_port = p_pi;
2498 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2499 RTE_ETH_DEV_NO_OWNER);
2500 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2503 peer_tx_port = peer_rx_port;
2507 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2508 hairpin_conf.peers[0].port = peer_rx_port;
2509 hairpin_conf.peers[0].queue = i + nb_rxq;
2510 hairpin_conf.manual_bind = !!manual;
2511 hairpin_conf.tx_explicit = !!tx_exp;
2512 diag = rte_eth_tx_hairpin_queue_setup
2513 (pi, qi, nb_txd, &hairpin_conf);
2518 /* Fail to setup rx queue, return */
2519 if (port->port_status == RTE_PORT_HANDLING)
2520 port->port_status = RTE_PORT_STOPPED;
2523 "Port %d can not be set back to stopped\n", pi);
2524 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2526 /* try to reconfigure queues next time */
2527 port->need_reconfig_queues = 1;
2530 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2531 hairpin_conf.peers[0].port = peer_tx_port;
2532 hairpin_conf.peers[0].queue = i + nb_txq;
2533 hairpin_conf.manual_bind = !!manual;
2534 hairpin_conf.tx_explicit = !!tx_exp;
2535 diag = rte_eth_rx_hairpin_queue_setup
2536 (pi, qi, nb_rxd, &hairpin_conf);
2541 /* Fail to setup rx queue, return */
2542 if (port->port_status == RTE_PORT_HANDLING)
2543 port->port_status = RTE_PORT_STOPPED;
2546 "Port %d can not be set back to stopped\n", pi);
2547 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2549 /* try to reconfigure queues next time */
2550 port->need_reconfig_queues = 1;
2556 /* Configure the Rx with optional split. */
2558 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2559 uint16_t nb_rx_desc, unsigned int socket_id,
2560 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2562 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2563 unsigned int i, mp_n;
2566 if (rx_pkt_nb_segs <= 1 ||
2567 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2568 rx_conf->rx_seg = NULL;
2569 rx_conf->rx_nseg = 0;
2570 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2571 nb_rx_desc, socket_id,
2575 for (i = 0; i < rx_pkt_nb_segs; i++) {
2576 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2577 struct rte_mempool *mpx;
2579 * Use last valid pool for the segments with number
2580 * exceeding the pool index.
2582 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2583 mpx = mbuf_pool_find(socket_id, mp_n);
2584 /* Handle zero as mbuf data buffer size. */
2585 rx_seg->length = rx_pkt_seg_lengths[i] ?
2586 rx_pkt_seg_lengths[i] :
2587 mbuf_data_size[mp_n];
2588 rx_seg->offset = i < rx_pkt_nb_offs ?
2589 rx_pkt_seg_offsets[i] : 0;
2590 rx_seg->mp = mpx ? mpx : mp;
2592 rx_conf->rx_nseg = rx_pkt_nb_segs;
2593 rx_conf->rx_seg = rx_useg;
2594 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2595 socket_id, rx_conf, NULL);
2596 rx_conf->rx_seg = NULL;
2597 rx_conf->rx_nseg = 0;
2602 alloc_xstats_display_info(portid_t pi)
2604 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2605 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2606 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2608 if (xstats_display_num == 0)
2611 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2612 if (*ids_supp == NULL)
2615 *prev_values = calloc(xstats_display_num,
2616 sizeof(**prev_values));
2617 if (*prev_values == NULL)
2618 goto fail_prev_values;
2620 *curr_values = calloc(xstats_display_num,
2621 sizeof(**curr_values));
2622 if (*curr_values == NULL)
2623 goto fail_curr_values;
2625 ports[pi].xstats_info.allocated = true;
2638 free_xstats_display_info(portid_t pi)
2640 if (!ports[pi].xstats_info.allocated)
2642 free(ports[pi].xstats_info.ids_supp);
2643 free(ports[pi].xstats_info.prev_values);
2644 free(ports[pi].xstats_info.curr_values);
2645 ports[pi].xstats_info.allocated = false;
2648 /** Fill helper structures for specified port to show extended statistics. */
2650 fill_xstats_display_info_for_port(portid_t pi)
2652 unsigned int stat, stat_supp;
2653 const char *xstat_name;
2654 struct rte_port *port;
2658 if (xstats_display_num == 0)
2661 if (pi == (portid_t)RTE_PORT_ALL) {
2662 fill_xstats_display_info();
2667 if (port->port_status != RTE_PORT_STARTED)
2670 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2671 rte_exit(EXIT_FAILURE,
2672 "Failed to allocate xstats display memory\n");
2674 ids_supp = port->xstats_info.ids_supp;
2675 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2676 xstat_name = xstats_display[stat].name;
2677 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2678 ids_supp + stat_supp);
2680 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2681 xstat_name, pi, stat);
2687 port->xstats_info.ids_supp_sz = stat_supp;
2690 /** Fill helper structures for all ports to show extended statistics. */
2692 fill_xstats_display_info(void)
2696 if (xstats_display_num == 0)
2699 RTE_ETH_FOREACH_DEV(pi)
2700 fill_xstats_display_info_for_port(pi);
2704 start_port(portid_t pid)
2706 int diag, need_check_link_status = -1;
2708 portid_t p_pi = RTE_MAX_ETHPORTS;
2709 portid_t pl[RTE_MAX_ETHPORTS];
2710 portid_t peer_pl[RTE_MAX_ETHPORTS];
2711 uint16_t cnt_pi = 0;
2712 uint16_t cfg_pi = 0;
2715 struct rte_port *port;
2716 struct rte_eth_hairpin_cap cap;
2718 if (port_id_is_invalid(pid, ENABLED_WARN))
2721 RTE_ETH_FOREACH_DEV(pi) {
2722 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2725 need_check_link_status = 0;
2727 if (port->port_status == RTE_PORT_STOPPED)
2728 port->port_status = RTE_PORT_HANDLING;
2730 fprintf(stderr, "Port %d is now not stopped\n", pi);
2734 if (port->need_reconfig > 0) {
2735 struct rte_eth_conf dev_conf;
2738 port->need_reconfig = 0;
2740 if (flow_isolate_all) {
2741 int ret = port_flow_isolate(pi, 1);
2744 "Failed to apply isolated mode on port %d\n",
2749 configure_rxtx_dump_callbacks(0);
2750 printf("Configuring Port %d (socket %u)\n", pi,
2752 if (nb_hairpinq > 0 &&
2753 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2755 "Port %d doesn't support hairpin queues\n",
2760 /* configure port */
2761 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2762 nb_txq + nb_hairpinq,
2765 if (port->port_status == RTE_PORT_HANDLING)
2766 port->port_status = RTE_PORT_STOPPED;
2769 "Port %d can not be set back to stopped\n",
2771 fprintf(stderr, "Fail to configure port %d\n",
2773 /* try to reconfigure port next time */
2774 port->need_reconfig = 1;
2777 /* get device configuration*/
2779 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2781 "port %d can not get device configuration\n",
2785 /* Apply Rx offloads configuration */
2786 if (dev_conf.rxmode.offloads !=
2787 port->dev_conf.rxmode.offloads) {
2788 port->dev_conf.rxmode.offloads |=
2789 dev_conf.rxmode.offloads;
2791 k < port->dev_info.max_rx_queues;
2793 port->rx_conf[k].offloads |=
2794 dev_conf.rxmode.offloads;
2796 /* Apply Tx offloads configuration */
2797 if (dev_conf.txmode.offloads !=
2798 port->dev_conf.txmode.offloads) {
2799 port->dev_conf.txmode.offloads |=
2800 dev_conf.txmode.offloads;
2802 k < port->dev_info.max_tx_queues;
2804 port->tx_conf[k].offloads |=
2805 dev_conf.txmode.offloads;
2808 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2809 port->need_reconfig_queues = 0;
2810 /* setup tx queues */
2811 for (qi = 0; qi < nb_txq; qi++) {
2812 if ((numa_support) &&
2813 (txring_numa[pi] != NUMA_NO_CONFIG))
2814 diag = rte_eth_tx_queue_setup(pi, qi,
2815 port->nb_tx_desc[qi],
2817 &(port->tx_conf[qi]));
2819 diag = rte_eth_tx_queue_setup(pi, qi,
2820 port->nb_tx_desc[qi],
2822 &(port->tx_conf[qi]));
2827 /* Fail to setup tx queue, return */
2828 if (port->port_status == RTE_PORT_HANDLING)
2829 port->port_status = RTE_PORT_STOPPED;
2832 "Port %d can not be set back to stopped\n",
2835 "Fail to configure port %d tx queues\n",
2837 /* try to reconfigure queues next time */
2838 port->need_reconfig_queues = 1;
2841 for (qi = 0; qi < nb_rxq; qi++) {
2842 /* setup rx queues */
2843 if ((numa_support) &&
2844 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2845 struct rte_mempool * mp =
2847 (rxring_numa[pi], 0);
2850 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2855 diag = rx_queue_setup(pi, qi,
2856 port->nb_rx_desc[qi],
2858 &(port->rx_conf[qi]),
2861 struct rte_mempool *mp =
2863 (port->socket_id, 0);
2866 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2870 diag = rx_queue_setup(pi, qi,
2871 port->nb_rx_desc[qi],
2873 &(port->rx_conf[qi]),
2879 /* Fail to setup rx queue, return */
2880 if (port->port_status == RTE_PORT_HANDLING)
2881 port->port_status = RTE_PORT_STOPPED;
2884 "Port %d can not be set back to stopped\n",
2887 "Fail to configure port %d rx queues\n",
2889 /* try to reconfigure queues next time */
2890 port->need_reconfig_queues = 1;
2893 /* setup hairpin queues */
2894 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2897 configure_rxtx_dump_callbacks(verbose_level);
2899 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2903 "Port %d: Failed to disable Ptype parsing\n",
2911 diag = eth_dev_start_mp(pi);
2913 fprintf(stderr, "Fail to start port %d: %s\n",
2914 pi, rte_strerror(-diag));
2916 /* Fail to setup rx queue, return */
2917 if (port->port_status == RTE_PORT_HANDLING)
2918 port->port_status = RTE_PORT_STOPPED;
2921 "Port %d can not be set back to stopped\n",
2926 if (port->port_status == RTE_PORT_HANDLING)
2927 port->port_status = RTE_PORT_STARTED;
2929 fprintf(stderr, "Port %d can not be set into started\n",
2932 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2933 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2934 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2936 /* at least one port started, need checking link status */
2937 need_check_link_status = 1;
2942 if (need_check_link_status == 1 && !no_link_check)
2943 check_all_ports_link_status(RTE_PORT_ALL);
2944 else if (need_check_link_status == 0)
2945 fprintf(stderr, "Please stop the ports first\n");
2947 if (hairpin_mode & 0xf) {
2951 /* bind all started hairpin ports */
2952 for (i = 0; i < cfg_pi; i++) {
2954 /* bind current Tx to all peer Rx */
2955 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2956 RTE_MAX_ETHPORTS, 1);
2959 for (j = 0; j < peer_pi; j++) {
2960 if (!port_is_started(peer_pl[j]))
2962 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2965 "Error during binding hairpin Tx port %u to %u: %s\n",
2967 rte_strerror(-diag));
2971 /* bind all peer Tx to current Rx */
2972 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2973 RTE_MAX_ETHPORTS, 0);
2976 for (j = 0; j < peer_pi; j++) {
2977 if (!port_is_started(peer_pl[j]))
2979 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2982 "Error during binding hairpin Tx port %u to %u: %s\n",
2984 rte_strerror(-diag));
2991 fill_xstats_display_info_for_port(pid);
2998 stop_port(portid_t pid)
3001 struct rte_port *port;
3002 int need_check_link_status = 0;
3003 portid_t peer_pl[RTE_MAX_ETHPORTS];
3006 if (port_id_is_invalid(pid, ENABLED_WARN))
3009 printf("Stopping ports...\n");
3011 RTE_ETH_FOREACH_DEV(pi) {
3012 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3015 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3017 "Please remove port %d from forwarding configuration.\n",
3022 if (port_is_bonding_slave(pi)) {
3024 "Please remove port %d from bonded device.\n",
3030 if (port->port_status == RTE_PORT_STARTED)
3031 port->port_status = RTE_PORT_HANDLING;
3035 if (hairpin_mode & 0xf) {
3038 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3039 /* unbind all peer Tx from current Rx */
3040 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3041 RTE_MAX_ETHPORTS, 0);
3044 for (j = 0; j < peer_pi; j++) {
3045 if (!port_is_started(peer_pl[j]))
3047 rte_eth_hairpin_unbind(peer_pl[j], pi);
3051 if (port->flow_list)
3052 port_flow_flush(pi);
3054 if (eth_dev_stop_mp(pi) != 0)
3055 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3058 if (port->port_status == RTE_PORT_HANDLING)
3059 port->port_status = RTE_PORT_STOPPED;
3061 fprintf(stderr, "Port %d can not be set into stopped\n",
3063 need_check_link_status = 1;
3065 if (need_check_link_status && !no_link_check)
3066 check_all_ports_link_status(RTE_PORT_ALL);
3072 remove_invalid_ports_in(portid_t *array, portid_t *total)
3075 portid_t new_total = 0;
3077 for (i = 0; i < *total; i++)
3078 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3079 array[new_total] = array[i];
3086 remove_invalid_ports(void)
3088 remove_invalid_ports_in(ports_ids, &nb_ports);
3089 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3090 nb_cfg_ports = nb_fwd_ports;
3094 close_port(portid_t pid)
3097 struct rte_port *port;
3099 if (port_id_is_invalid(pid, ENABLED_WARN))
3102 printf("Closing ports...\n");
3104 RTE_ETH_FOREACH_DEV(pi) {
3105 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3108 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3110 "Please remove port %d from forwarding configuration.\n",
3115 if (port_is_bonding_slave(pi)) {
3117 "Please remove port %d from bonded device.\n",
3123 if (port->port_status == RTE_PORT_CLOSED) {
3124 fprintf(stderr, "Port %d is already closed\n", pi);
3128 if (is_proc_primary()) {
3129 port_flow_flush(pi);
3130 port_flex_item_flush(pi);
3131 rte_eth_dev_close(pi);
3134 free_xstats_display_info(pi);
3137 remove_invalid_ports();
3142 reset_port(portid_t pid)
3146 struct rte_port *port;
3148 if (port_id_is_invalid(pid, ENABLED_WARN))
3151 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3152 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3154 "Can not reset port(s), please stop port(s) first.\n");
3158 printf("Resetting ports...\n");
3160 RTE_ETH_FOREACH_DEV(pi) {
3161 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3164 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3166 "Please remove port %d from forwarding configuration.\n",
3171 if (port_is_bonding_slave(pi)) {
3173 "Please remove port %d from bonded device.\n",
3178 diag = rte_eth_dev_reset(pi);
3181 port->need_reconfig = 1;
3182 port->need_reconfig_queues = 1;
3184 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3193 attach_port(char *identifier)
3196 struct rte_dev_iterator iterator;
3198 printf("Attaching a new port...\n");
3200 if (identifier == NULL) {
3201 fprintf(stderr, "Invalid parameters are specified\n");
3205 if (rte_dev_probe(identifier) < 0) {
3206 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3210 /* first attach mode: event */
3211 if (setup_on_probe_event) {
3212 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3213 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3214 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3215 ports[pi].need_setup != 0)
3216 setup_attached_port(pi);
3220 /* second attach mode: iterator */
3221 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3222 /* setup ports matching the devargs used for probing */
3223 if (port_is_forwarding(pi))
3224 continue; /* port was already attached before */
3225 setup_attached_port(pi);
3230 setup_attached_port(portid_t pi)
3232 unsigned int socket_id;
3235 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3236 /* if socket_id is invalid, set to the first available socket. */
3237 if (check_socket_id(socket_id) < 0)
3238 socket_id = socket_ids[0];
3239 reconfig(pi, socket_id);
3240 ret = rte_eth_promiscuous_enable(pi);
3243 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3244 pi, rte_strerror(-ret));
3246 ports_ids[nb_ports++] = pi;
3247 fwd_ports_ids[nb_fwd_ports++] = pi;
3248 nb_cfg_ports = nb_fwd_ports;
3249 ports[pi].need_setup = 0;
3250 ports[pi].port_status = RTE_PORT_STOPPED;
3252 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3257 detach_device(struct rte_device *dev)
3262 fprintf(stderr, "Device already removed\n");
3266 printf("Removing a device...\n");
3268 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3269 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3270 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3271 fprintf(stderr, "Port %u not stopped\n",
3275 port_flow_flush(sibling);
3279 if (rte_dev_remove(dev) < 0) {
3280 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3283 remove_invalid_ports();
3285 printf("Device is detached\n");
3286 printf("Now total ports is %d\n", nb_ports);
3292 detach_port_device(portid_t port_id)
3295 struct rte_eth_dev_info dev_info;
3297 if (port_id_is_invalid(port_id, ENABLED_WARN))
3300 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3301 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3302 fprintf(stderr, "Port not stopped\n");
3305 fprintf(stderr, "Port was not closed\n");
3308 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3311 "Failed to get device info for port %d, not detaching\n",
3315 detach_device(dev_info.device);
3319 detach_devargs(char *identifier)
3321 struct rte_dev_iterator iterator;
3322 struct rte_devargs da;
3325 printf("Removing a device...\n");
3327 memset(&da, 0, sizeof(da));
3328 if (rte_devargs_parsef(&da, "%s", identifier)) {
3329 fprintf(stderr, "cannot parse identifier\n");
3333 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3334 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3335 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3336 fprintf(stderr, "Port %u not stopped\n",
3338 rte_eth_iterator_cleanup(&iterator);
3339 rte_devargs_reset(&da);
3342 port_flow_flush(port_id);
3346 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3347 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3348 da.name, da.bus->name);
3349 rte_devargs_reset(&da);
3353 remove_invalid_ports();
3355 printf("Device %s is detached\n", identifier);
3356 printf("Now total ports is %d\n", nb_ports);
3358 rte_devargs_reset(&da);
3369 stop_packet_forwarding();
3371 #ifndef RTE_EXEC_ENV_WINDOWS
3372 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3374 if (mp_alloc_type == MP_ALLOC_ANON)
3375 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3380 if (ports != NULL) {
3382 RTE_ETH_FOREACH_DEV(pt_id) {
3383 printf("\nStopping port %d...\n", pt_id);
3387 RTE_ETH_FOREACH_DEV(pt_id) {
3388 printf("\nShutting down port %d...\n", pt_id);
3395 ret = rte_dev_event_monitor_stop();
3398 "fail to stop device event monitor.");
3402 ret = rte_dev_event_callback_unregister(NULL,
3403 dev_event_callback, NULL);
3406 "fail to unregister device event callback.\n");
3410 ret = rte_dev_hotplug_handle_disable();
3413 "fail to disable hotplug handling.\n");
3417 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3419 mempool_free_mp(mempools[i]);
3421 free(xstats_display);
3423 printf("\nBye...\n");
3426 typedef void (*cmd_func_t)(void);
3427 struct pmd_test_command {
3428 const char *cmd_name;
3429 cmd_func_t cmd_func;
3432 /* Check the link status of all ports in up to 9s, and print them finally */
3434 check_all_ports_link_status(uint32_t port_mask)
3436 #define CHECK_INTERVAL 100 /* 100ms */
3437 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3439 uint8_t count, all_ports_up, print_flag = 0;
3440 struct rte_eth_link link;
3442 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3444 printf("Checking link statuses...\n");
3446 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3448 RTE_ETH_FOREACH_DEV(portid) {
3449 if ((port_mask & (1 << portid)) == 0)
3451 memset(&link, 0, sizeof(link));
3452 ret = rte_eth_link_get_nowait(portid, &link);
3455 if (print_flag == 1)
3457 "Port %u link get failed: %s\n",
3458 portid, rte_strerror(-ret));
3461 /* print link status if flag set */
3462 if (print_flag == 1) {
3463 rte_eth_link_to_str(link_status,
3464 sizeof(link_status), &link);
3465 printf("Port %d %s\n", portid, link_status);
3468 /* clear all_ports_up flag if any link down */
3469 if (link.link_status == RTE_ETH_LINK_DOWN) {
3474 /* after finally printing all link status, get out */
3475 if (print_flag == 1)
3478 if (all_ports_up == 0) {
3480 rte_delay_ms(CHECK_INTERVAL);
3483 /* set the print_flag if all ports up or timeout */
3484 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3494 rmv_port_callback(void *arg)
3496 int need_to_start = 0;
3497 int org_no_link_check = no_link_check;
3498 portid_t port_id = (intptr_t)arg;
3499 struct rte_eth_dev_info dev_info;
3502 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3504 if (!test_done && port_is_forwarding(port_id)) {
3506 stop_packet_forwarding();
3510 no_link_check = org_no_link_check;
3512 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3515 "Failed to get device info for port %d, not detaching\n",
3518 struct rte_device *device = dev_info.device;
3519 close_port(port_id);
3520 detach_device(device); /* might be already removed or have more ports */
3523 start_packet_forwarding(0);
3526 /* This function is used by the interrupt thread */
3528 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3531 RTE_SET_USED(param);
3532 RTE_SET_USED(ret_param);
3534 if (type >= RTE_ETH_EVENT_MAX) {
3536 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3537 port_id, __func__, type);
3539 } else if (event_print_mask & (UINT32_C(1) << type)) {
3540 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3541 eth_event_desc[type]);
3546 case RTE_ETH_EVENT_NEW:
3547 ports[port_id].need_setup = 1;
3548 ports[port_id].port_status = RTE_PORT_HANDLING;
3550 case RTE_ETH_EVENT_INTR_RMV:
3551 if (port_id_is_invalid(port_id, DISABLED_WARN))
3553 if (rte_eal_alarm_set(100000,
3554 rmv_port_callback, (void *)(intptr_t)port_id))
3556 "Could not set up deferred device removal\n");
3558 case RTE_ETH_EVENT_DESTROY:
3559 ports[port_id].port_status = RTE_PORT_CLOSED;
3560 printf("Port %u is closed\n", port_id);
3569 register_eth_event_callback(void)
3572 enum rte_eth_event_type event;
3574 for (event = RTE_ETH_EVENT_UNKNOWN;
3575 event < RTE_ETH_EVENT_MAX; event++) {
3576 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3581 TESTPMD_LOG(ERR, "Failed to register callback for "
3582 "%s event\n", eth_event_desc[event]);
3590 /* This function is used by the interrupt thread */
3592 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3593 __rte_unused void *arg)
3598 if (type >= RTE_DEV_EVENT_MAX) {
3599 fprintf(stderr, "%s called upon invalid event %d\n",
3605 case RTE_DEV_EVENT_REMOVE:
3606 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3608 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3610 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3615 * Because the user's callback is invoked in eal interrupt
3616 * callback, the interrupt callback need to be finished before
3617 * it can be unregistered when detaching device. So finish
3618 * callback soon and use a deferred removal to detach device
3619 * is need. It is a workaround, once the device detaching be
3620 * moved into the eal in the future, the deferred removal could
3623 if (rte_eal_alarm_set(100000,
3624 rmv_port_callback, (void *)(intptr_t)port_id))
3626 "Could not set up deferred device removal\n");
3628 case RTE_DEV_EVENT_ADD:
3629 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3631 /* TODO: After finish kernel driver binding,
3632 * begin to attach port.
3641 rxtx_port_config(portid_t pid)
3645 struct rte_port *port = &ports[pid];
3647 for (qid = 0; qid < nb_rxq; qid++) {
3648 offloads = port->rx_conf[qid].offloads;
3649 port->rx_conf[qid] = port->dev_info.default_rxconf;
3651 if (rxq_share > 0 &&
3652 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3653 /* Non-zero share group to enable RxQ share. */
3654 port->rx_conf[qid].share_group = pid / rxq_share + 1;
3655 port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3659 port->rx_conf[qid].offloads = offloads;
3661 /* Check if any Rx parameters have been passed */
3662 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3663 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3665 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3666 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3668 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3669 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3671 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3672 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3674 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3675 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3677 port->nb_rx_desc[qid] = nb_rxd;
3680 for (qid = 0; qid < nb_txq; qid++) {
3681 offloads = port->tx_conf[qid].offloads;
3682 port->tx_conf[qid] = port->dev_info.default_txconf;
3684 port->tx_conf[qid].offloads = offloads;
3686 /* Check if any Tx parameters have been passed */
3687 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3688 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3690 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3691 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3693 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3694 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3696 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3697 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3699 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3700 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3702 port->nb_tx_desc[qid] = nb_txd;
3707 * Helper function to set MTU from frame size
3709 * port->dev_info should be set before calling this function.
3711 * return 0 on success, negative on error
3714 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3716 struct rte_port *port = &ports[portid];
3717 uint32_t eth_overhead;
3718 uint16_t mtu, new_mtu;
3720 eth_overhead = get_eth_overhead(&port->dev_info);
3722 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3723 printf("Failed to get MTU for port %u\n", portid);
3727 new_mtu = max_rx_pktlen - eth_overhead;
3732 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3734 "Failed to set MTU to %u for port %u\n",
3739 port->dev_conf.rxmode.mtu = new_mtu;
3745 init_port_config(void)
3748 struct rte_port *port;
3751 RTE_ETH_FOREACH_DEV(pid) {
3753 port->dev_conf.fdir_conf = fdir_conf;
3755 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3760 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3761 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3762 rss_hf & port->dev_info.flow_type_rss_offloads;
3764 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3765 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3768 if (port->dcb_flag == 0) {
3769 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3770 port->dev_conf.rxmode.mq_mode =
3771 (enum rte_eth_rx_mq_mode)
3772 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3774 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3775 port->dev_conf.rxmode.offloads &=
3776 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3779 i < port->dev_info.nb_rx_queues;
3781 port->rx_conf[i].offloads &=
3782 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3786 rxtx_port_config(pid);
3788 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3792 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3793 rte_pmd_ixgbe_bypass_init(pid);
3796 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3797 port->dev_conf.intr_conf.lsc = 1;
3798 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3799 port->dev_conf.intr_conf.rmv = 1;
3803 void set_port_slave_flag(portid_t slave_pid)
3805 struct rte_port *port;
3807 port = &ports[slave_pid];
3808 port->slave_flag = 1;
3811 void clear_port_slave_flag(portid_t slave_pid)
3813 struct rte_port *port;
3815 port = &ports[slave_pid];
3816 port->slave_flag = 0;
3819 uint8_t port_is_bonding_slave(portid_t slave_pid)
3821 struct rte_port *port;
3822 struct rte_eth_dev_info dev_info;
3825 port = &ports[slave_pid];
3826 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3829 "Failed to get device info for port id %d,"
3830 "cannot determine if the port is a bonded slave",
3834 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3839 const uint16_t vlan_tags[] = {
3840 0, 1, 2, 3, 4, 5, 6, 7,
3841 8, 9, 10, 11, 12, 13, 14, 15,
3842 16, 17, 18, 19, 20, 21, 22, 23,
3843 24, 25, 26, 27, 28, 29, 30, 31
3847 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3848 enum dcb_mode_enable dcb_mode,
3849 enum rte_eth_nb_tcs num_tcs,
3854 struct rte_eth_rss_conf rss_conf;
3857 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3858 * given above, and the number of traffic classes available for use.
3860 if (dcb_mode == DCB_VT_ENABLED) {
3861 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3862 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3863 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3864 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3866 /* VMDQ+DCB RX and TX configurations */
3867 vmdq_rx_conf->enable_default_pool = 0;
3868 vmdq_rx_conf->default_pool = 0;
3869 vmdq_rx_conf->nb_queue_pools =
3870 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3871 vmdq_tx_conf->nb_queue_pools =
3872 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3874 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3875 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3876 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3877 vmdq_rx_conf->pool_map[i].pools =
3878 1 << (i % vmdq_rx_conf->nb_queue_pools);
3880 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3881 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3882 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3885 /* set DCB mode of RX and TX of multiple queues */
3886 eth_conf->rxmode.mq_mode =
3887 (enum rte_eth_rx_mq_mode)
3888 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
3889 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
3891 struct rte_eth_dcb_rx_conf *rx_conf =
3892 ð_conf->rx_adv_conf.dcb_rx_conf;
3893 struct rte_eth_dcb_tx_conf *tx_conf =
3894 ð_conf->tx_adv_conf.dcb_tx_conf;
3896 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3898 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3902 rx_conf->nb_tcs = num_tcs;
3903 tx_conf->nb_tcs = num_tcs;
3905 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3906 rx_conf->dcb_tc[i] = i % num_tcs;
3907 tx_conf->dcb_tc[i] = i % num_tcs;
3910 eth_conf->rxmode.mq_mode =
3911 (enum rte_eth_rx_mq_mode)
3912 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
3913 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3914 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
3918 eth_conf->dcb_capability_en =
3919 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
3921 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
3927 init_port_dcb_config(portid_t pid,
3928 enum dcb_mode_enable dcb_mode,
3929 enum rte_eth_nb_tcs num_tcs,
3932 struct rte_eth_conf port_conf;
3933 struct rte_port *rte_port;
3937 if (num_procs > 1) {
3938 printf("The multi-process feature doesn't support dcb.\n");
3941 rte_port = &ports[pid];
3943 /* retain the original device configuration. */
3944 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3946 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3947 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3950 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3951 /* remove RSS HASH offload for DCB in vt mode */
3952 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
3953 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3954 for (i = 0; i < nb_rxq; i++)
3955 rte_port->rx_conf[i].offloads &=
3956 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3959 /* re-configure the device . */
3960 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3964 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3968 /* If dev_info.vmdq_pool_base is greater than 0,
3969 * the queue id of vmdq pools is started after pf queues.
3971 if (dcb_mode == DCB_VT_ENABLED &&
3972 rte_port->dev_info.vmdq_pool_base > 0) {
3974 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3979 /* Assume the ports in testpmd have the same dcb capability
3980 * and has the same number of rxq and txq in dcb mode
3982 if (dcb_mode == DCB_VT_ENABLED) {
3983 if (rte_port->dev_info.max_vfs > 0) {
3984 nb_rxq = rte_port->dev_info.nb_rx_queues;
3985 nb_txq = rte_port->dev_info.nb_tx_queues;
3987 nb_rxq = rte_port->dev_info.max_rx_queues;
3988 nb_txq = rte_port->dev_info.max_tx_queues;
3991 /*if vt is disabled, use all pf queues */
3992 if (rte_port->dev_info.vmdq_pool_base == 0) {
3993 nb_rxq = rte_port->dev_info.max_rx_queues;
3994 nb_txq = rte_port->dev_info.max_tx_queues;
3996 nb_rxq = (queueid_t)num_tcs;
3997 nb_txq = (queueid_t)num_tcs;
4001 rx_free_thresh = 64;
4003 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4005 rxtx_port_config(pid);
4007 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4008 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4009 rx_vft_set(pid, vlan_tags[i], 1);
4011 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4015 rte_port->dcb_flag = 1;
4017 /* Enter DCB configuration status */
4028 /* Configuration of Ethernet ports. */
4029 ports = rte_zmalloc("testpmd: ports",
4030 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4031 RTE_CACHE_LINE_SIZE);
4032 if (ports == NULL) {
4033 rte_exit(EXIT_FAILURE,
4034 "rte_zmalloc(%d struct rte_port) failed\n",
4037 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4038 ports[i].xstats_info.allocated = false;
4039 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4040 LIST_INIT(&ports[i].flow_tunnel_list);
4041 /* Initialize ports NUMA structures */
4042 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4043 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4044 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4058 const char clr[] = { 27, '[', '2', 'J', '\0' };
4059 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4061 /* Clear screen and move to top left */
4062 printf("%s%s", clr, top_left);
4064 printf("\nPort statistics ====================================");
4065 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4066 nic_stats_display(fwd_ports_ids[i]);
4072 signal_handler(int signum)
4074 if (signum == SIGINT || signum == SIGTERM) {
4075 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4077 #ifdef RTE_LIB_PDUMP
4078 /* uninitialize packet capture framework */
4081 #ifdef RTE_LIB_LATENCYSTATS
4082 if (latencystats_enabled != 0)
4083 rte_latencystats_uninit();
4086 /* Set flag to indicate the force termination. */
4088 /* exit with the expected status */
4089 #ifndef RTE_EXEC_ENV_WINDOWS
4090 signal(signum, SIG_DFL);
4091 kill(getpid(), signum);
4097 main(int argc, char** argv)
4104 signal(SIGINT, signal_handler);
4105 signal(SIGTERM, signal_handler);
4107 testpmd_logtype = rte_log_register("testpmd");
4108 if (testpmd_logtype < 0)
4109 rte_exit(EXIT_FAILURE, "Cannot register log type");
4110 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4112 diag = rte_eal_init(argc, argv);
4114 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4115 rte_strerror(rte_errno));
4117 ret = register_eth_event_callback();
4119 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4121 #ifdef RTE_LIB_PDUMP
4122 /* initialize packet capture framework */
4127 RTE_ETH_FOREACH_DEV(port_id) {
4128 ports_ids[count] = port_id;
4131 nb_ports = (portid_t) count;
4133 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4135 /* allocate port structures, and init them */
4138 set_def_fwd_config();
4140 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4141 "Check the core mask argument\n");
4143 /* Bitrate/latency stats disabled by default */
4144 #ifdef RTE_LIB_BITRATESTATS
4145 bitrate_enabled = 0;
4147 #ifdef RTE_LIB_LATENCYSTATS
4148 latencystats_enabled = 0;
4151 /* on FreeBSD, mlockall() is disabled by default */
4152 #ifdef RTE_EXEC_ENV_FREEBSD
4161 launch_args_parse(argc, argv);
4163 #ifndef RTE_EXEC_ENV_WINDOWS
4164 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4165 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4170 if (tx_first && interactive)
4171 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4172 "interactive mode.\n");
4174 if (tx_first && lsc_interrupt) {
4176 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4180 if (!nb_rxq && !nb_txq)
4182 "Warning: Either rx or tx queues should be non-zero\n");
4184 if (nb_rxq > 1 && nb_rxq > nb_txq)
4186 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4192 ret = rte_dev_hotplug_handle_enable();
4195 "fail to enable hotplug handling.");
4199 ret = rte_dev_event_monitor_start();
4202 "fail to start device event monitoring.");
4206 ret = rte_dev_event_callback_register(NULL,
4207 dev_event_callback, NULL);
4210 "fail to register device event callback\n");
4215 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4216 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4218 /* set all ports to promiscuous mode by default */
4219 RTE_ETH_FOREACH_DEV(port_id) {
4220 ret = rte_eth_promiscuous_enable(port_id);
4223 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4224 port_id, rte_strerror(-ret));
4227 #ifdef RTE_LIB_METRICS
4228 /* Init metrics library */
4229 rte_metrics_init(rte_socket_id());
4232 #ifdef RTE_LIB_LATENCYSTATS
4233 if (latencystats_enabled != 0) {
4234 int ret = rte_latencystats_init(1, NULL);
4237 "Warning: latencystats init() returned error %d\n",
4239 fprintf(stderr, "Latencystats running on lcore %d\n",
4240 latencystats_lcore_id);
4244 /* Setup bitrate stats */
4245 #ifdef RTE_LIB_BITRATESTATS
4246 if (bitrate_enabled != 0) {
4247 bitrate_data = rte_stats_bitrate_create();
4248 if (bitrate_data == NULL)
4249 rte_exit(EXIT_FAILURE,
4250 "Could not allocate bitrate data.\n");
4251 rte_stats_bitrate_reg(bitrate_data);
4254 #ifdef RTE_LIB_CMDLINE
4255 if (strlen(cmdline_filename) != 0)
4256 cmdline_read_from_file(cmdline_filename);
4258 if (interactive == 1) {
4260 printf("Start automatic packet forwarding\n");
4261 start_packet_forwarding(0);
4273 printf("No commandline core given, start packet forwarding\n");
4274 start_packet_forwarding(tx_first);
4275 if (stats_period != 0) {
4276 uint64_t prev_time = 0, cur_time, diff_time = 0;
4277 uint64_t timer_period;
4279 /* Convert to number of cycles */
4280 timer_period = stats_period * rte_get_timer_hz();
4282 while (f_quit == 0) {
4283 cur_time = rte_get_timer_cycles();
4284 diff_time += cur_time - prev_time;
4286 if (diff_time >= timer_period) {
4288 /* Reset the timer */
4291 /* Sleep to avoid unnecessary checks */
4292 prev_time = cur_time;
4293 rte_delay_us_sleep(US_PER_S);
4297 printf("Press enter to exit\n");
4298 rc = read(0, &c, 1);
4304 ret = rte_eal_cleanup();
4306 rte_exit(EXIT_FAILURE,
4307 "EAL cleanup failed: %s\n", strerror(-ret));
4309 return EXIT_SUCCESS;