1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
52 #include <rte_pmd_ixgbe.h>
55 #include <rte_pdump.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
65 #ifdef RTE_EXEC_ENV_WINDOWS
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
75 #define HUGE_FLAG MAP_HUGETLB
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
95 char cmdline_filename[PATH_MAX] = {0};
98 * NUMA support configuration.
99 * When set, the NUMA support attempts to dispatch the allocation of the
100 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101 * probed ports among the CPU sockets 0 and 1.
102 * Otherwise, all memory is allocated from CPU socket 0.
104 uint8_t numa_support = 1; /**< numa enabled by default */
107 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110 uint8_t socket_num = UMA_NO_CONFIG;
113 * Select mempool allocation type:
114 * - native: use regular DPDK memory
115 * - anon: use regular DPDK memory to create mempool, but populate using
116 * anonymous memory (may not be IOVA-contiguous)
117 * - xmem: use externally allocated hugepage memory
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
122 * Store specified sockets on which memory pool to be used by ports
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which RX ring to be used by ports
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
134 * Store specified sockets on which TX ring to be used by ports
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
140 * Record the Ethernet address of peer target ports to which packets are
142 * Must be instantiated with the ethernet addresses of peer traffic generator
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
149 * Probed Target Environment.
151 struct rte_port *ports; /**< For all probed ethernet ports. */
152 portid_t nb_ports; /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
159 * Test Forwarding Configuration.
160 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t nb_cfg_ports; /**< Number of configured ports. */
166 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
175 * Forwarding engines.
177 struct fwd_engine * fwd_engines[] = {
187 &five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 &ieee1588_fwd_engine,
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
208 * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
211 /** Extended statistics to show. */
212 struct rte_eth_xstat_name *xstats_display;
214 unsigned int xstats_display_num; /**< Size of extended statistics to show */
217 * In container, it cannot terminate the process which running with 'stats-period'
218 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
223 * Configuration of packet segments used to scatter received packets
224 * if some of split features is configured.
226 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
227 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
228 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
229 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
232 * Configuration of packet segments used by the "txonly" processing engine.
234 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
235 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
236 TXONLY_DEF_PACKET_LEN,
238 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
240 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
241 /**< Split policy for packets to TX. */
243 uint8_t txonly_multi_flow;
244 /**< Whether multiple flows are generated in TXONLY mode. */
246 uint32_t tx_pkt_times_inter;
247 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
249 uint32_t tx_pkt_times_intra;
250 /**< Timings for send scheduling in TXONLY mode, time between packets. */
252 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
253 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
254 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
255 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
257 /* current configuration is in DCB or not,0 means it is not in DCB mode */
258 uint8_t dcb_config = 0;
261 * Configurable number of RX/TX queues.
263 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
264 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
265 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
268 * Configurable number of RX/TX ring descriptors.
269 * Defaults are supplied by drivers via ethdev.
271 #define RTE_TEST_RX_DESC_DEFAULT 0
272 #define RTE_TEST_TX_DESC_DEFAULT 0
273 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
274 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
276 #define RTE_PMD_PARAM_UNSET -1
278 * Configurable values of RX and TX ring threshold registers.
281 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
282 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
283 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
285 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
286 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
287 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
290 * Configurable value of RX free threshold.
292 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
295 * Configurable value of RX drop enable.
297 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
300 * Configurable value of TX free threshold.
302 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
305 * Configurable value of TX RS bit threshold.
307 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
310 * Configurable value of buffered packets before sending.
312 uint16_t noisy_tx_sw_bufsz;
315 * Configurable value of packet buffer timeout.
317 uint16_t noisy_tx_sw_buf_flush_time;
320 * Configurable value for size of VNF internal memory area
321 * used for simulating noisy neighbour behaviour
323 uint64_t noisy_lkup_mem_sz;
326 * Configurable value of number of random writes done in
327 * VNF simulation memory area.
329 uint64_t noisy_lkup_num_writes;
332 * Configurable value of number of random reads done in
333 * VNF simulation memory area.
335 uint64_t noisy_lkup_num_reads;
338 * Configurable value of number of random reads/writes done in
339 * VNF simulation memory area.
341 uint64_t noisy_lkup_num_reads_writes;
344 * Receive Side Scaling (RSS) configuration.
346 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
349 * Port topology configuration
351 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
354 * Avoids to flush all the RX streams before starts forwarding.
356 uint8_t no_flush_rx = 0; /* flush by default */
359 * Flow API isolated mode.
361 uint8_t flow_isolate_all;
364 * Avoids to check link status when starting/stopping a port.
366 uint8_t no_link_check = 0; /* check by default */
369 * Don't automatically start all ports in interactive mode.
371 uint8_t no_device_start = 0;
374 * Enable link status change notification
376 uint8_t lsc_interrupt = 1; /* enabled by default */
379 * Enable device removal notification.
381 uint8_t rmv_interrupt = 1; /* enabled by default */
383 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
385 /* After attach, port setup is called on event or by iterator */
386 bool setup_on_probe_event = true;
388 /* Clear ptypes on port initialization. */
389 uint8_t clear_ptypes = true;
391 /* Hairpin ports configuration mode. */
392 uint16_t hairpin_mode;
394 /* Pretty printing of ethdev events */
395 static const char * const eth_event_desc[] = {
396 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
397 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
398 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
399 [RTE_ETH_EVENT_INTR_RESET] = "reset",
400 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
401 [RTE_ETH_EVENT_IPSEC] = "IPsec",
402 [RTE_ETH_EVENT_MACSEC] = "MACsec",
403 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
404 [RTE_ETH_EVENT_NEW] = "device probed",
405 [RTE_ETH_EVENT_DESTROY] = "device released",
406 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
407 [RTE_ETH_EVENT_MAX] = NULL,
411 * Display or mask ether events
412 * Default to all events except VF_MBOX
414 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
415 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
416 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
417 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
418 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
419 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
420 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
421 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
423 * Decide if all memory are locked for performance.
428 * NIC bypass mode configuration options.
431 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
432 /* The NIC bypass watchdog timeout. */
433 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
437 #ifdef RTE_LIB_LATENCYSTATS
440 * Set when latency stats is enabled in the commandline
442 uint8_t latencystats_enabled;
445 * Lcore ID to serive latency statistics.
447 lcoreid_t latencystats_lcore_id = -1;
452 * Ethernet device configuration.
454 struct rte_eth_rxmode rx_mode = {
455 /* Default maximum frame length.
456 * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
462 struct rte_eth_txmode tx_mode = {
463 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
466 struct rte_fdir_conf fdir_conf = {
467 .mode = RTE_FDIR_MODE_NONE,
468 .pballoc = RTE_FDIR_PBALLOC_64K,
469 .status = RTE_FDIR_REPORT_STATUS,
471 .vlan_tci_mask = 0xFFEF,
473 .src_ip = 0xFFFFFFFF,
474 .dst_ip = 0xFFFFFFFF,
477 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
478 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
480 .src_port_mask = 0xFFFF,
481 .dst_port_mask = 0xFFFF,
482 .mac_addr_byte_mask = 0xFF,
483 .tunnel_type_mask = 1,
484 .tunnel_id_mask = 0xFFFFFFFF,
489 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
492 * Display zero values by default for xstats
494 uint8_t xstats_hide_zero;
497 * Measure of CPU cycles disabled by default
499 uint8_t record_core_cycles;
502 * Display of RX and TX bursts disabled by default
504 uint8_t record_burst_stats;
506 unsigned int num_sockets = 0;
507 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
509 #ifdef RTE_LIB_BITRATESTATS
510 /* Bitrate statistics */
511 struct rte_stats_bitrates *bitrate_data;
512 lcoreid_t bitrate_lcore_id;
513 uint8_t bitrate_enabled;
516 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
517 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
520 * hexadecimal bitmask of RX mq mode can be enabled.
522 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
525 * Used to set forced link speed
527 uint32_t eth_link_speed;
530 * ID of the current process in multi-process, used to
531 * configure the queues to be polled.
536 * Number of processes in multi-process, used to
537 * configure the queues to be polled.
539 unsigned int num_procs = 1;
542 eth_rx_metadata_negotiate_mp(uint16_t port_id)
544 uint64_t rx_meta_features = 0;
547 if (!is_proc_primary())
550 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
551 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
552 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
554 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
556 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
557 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
561 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
562 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
566 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
567 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
570 } else if (ret != -ENOTSUP) {
571 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
572 port_id, rte_strerror(-ret));
577 flow_pick_transfer_proxy_mp(uint16_t port_id)
579 struct rte_port *port = &ports[port_id];
582 port->flow_transfer_proxy = port_id;
584 if (!is_proc_primary())
587 ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
590 fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
591 port_id, rte_strerror(-ret));
596 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
597 const struct rte_eth_conf *dev_conf)
599 if (is_proc_primary())
600 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
606 eth_dev_start_mp(uint16_t port_id)
608 if (is_proc_primary())
609 return rte_eth_dev_start(port_id);
615 eth_dev_stop_mp(uint16_t port_id)
617 if (is_proc_primary())
618 return rte_eth_dev_stop(port_id);
624 mempool_free_mp(struct rte_mempool *mp)
626 if (is_proc_primary())
627 rte_mempool_free(mp);
631 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
633 if (is_proc_primary())
634 return rte_eth_dev_set_mtu(port_id, mtu);
639 /* Forward function declarations */
640 static void setup_attached_port(portid_t pi);
641 static void check_all_ports_link_status(uint32_t port_mask);
642 static int eth_event_callback(portid_t port_id,
643 enum rte_eth_event_type type,
644 void *param, void *ret_param);
645 static void dev_event_callback(const char *device_name,
646 enum rte_dev_event_type type,
648 static void fill_xstats_display_info(void);
651 * Check if all the ports are started.
652 * If yes, return positive value. If not, return zero.
654 static int all_ports_started(void);
656 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
657 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
659 /* Holds the registered mbuf dynamic flags names. */
660 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
664 * Helper function to check if socket is already discovered.
665 * If yes, return positive value. If not, return zero.
668 new_socket_id(unsigned int socket_id)
672 for (i = 0; i < num_sockets; i++) {
673 if (socket_ids[i] == socket_id)
680 * Setup default configuration.
683 set_default_fwd_lcores_config(void)
687 unsigned int sock_num;
690 for (i = 0; i < RTE_MAX_LCORE; i++) {
691 if (!rte_lcore_is_enabled(i))
693 sock_num = rte_lcore_to_socket_id(i);
694 if (new_socket_id(sock_num)) {
695 if (num_sockets >= RTE_MAX_NUMA_NODES) {
696 rte_exit(EXIT_FAILURE,
697 "Total sockets greater than %u\n",
700 socket_ids[num_sockets++] = sock_num;
702 if (i == rte_get_main_lcore())
704 fwd_lcores_cpuids[nb_lc++] = i;
706 nb_lcores = (lcoreid_t) nb_lc;
707 nb_cfg_lcores = nb_lcores;
712 set_def_peer_eth_addrs(void)
716 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
717 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
718 peer_eth_addrs[i].addr_bytes[5] = i;
723 set_default_fwd_ports_config(void)
728 RTE_ETH_FOREACH_DEV(pt_id) {
729 fwd_ports_ids[i++] = pt_id;
731 /* Update sockets info according to the attached device */
732 int socket_id = rte_eth_dev_socket_id(pt_id);
733 if (socket_id >= 0 && new_socket_id(socket_id)) {
734 if (num_sockets >= RTE_MAX_NUMA_NODES) {
735 rte_exit(EXIT_FAILURE,
736 "Total sockets greater than %u\n",
739 socket_ids[num_sockets++] = socket_id;
743 nb_cfg_ports = nb_ports;
744 nb_fwd_ports = nb_ports;
748 set_def_fwd_config(void)
750 set_default_fwd_lcores_config();
751 set_def_peer_eth_addrs();
752 set_default_fwd_ports_config();
755 #ifndef RTE_EXEC_ENV_WINDOWS
756 /* extremely pessimistic estimation of memory required to create a mempool */
758 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
760 unsigned int n_pages, mbuf_per_pg, leftover;
761 uint64_t total_mem, mbuf_mem, obj_sz;
763 /* there is no good way to predict how much space the mempool will
764 * occupy because it will allocate chunks on the fly, and some of those
765 * will come from default DPDK memory while some will come from our
766 * external memory, so just assume 128MB will be enough for everyone.
768 uint64_t hdr_mem = 128 << 20;
770 /* account for possible non-contiguousness */
771 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
773 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
777 mbuf_per_pg = pgsz / obj_sz;
778 leftover = (nb_mbufs % mbuf_per_pg) > 0;
779 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
781 mbuf_mem = n_pages * pgsz;
783 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
785 if (total_mem > SIZE_MAX) {
786 TESTPMD_LOG(ERR, "Memory size too big\n");
789 *out = (size_t)total_mem;
795 pagesz_flags(uint64_t page_sz)
797 /* as per mmap() manpage, all page sizes are log2 of page size
798 * shifted by MAP_HUGE_SHIFT
800 int log2 = rte_log2_u64(page_sz);
802 return (log2 << HUGE_SHIFT);
806 alloc_mem(size_t memsz, size_t pgsz, bool huge)
811 /* allocate anonymous hugepages */
812 flags = MAP_ANONYMOUS | MAP_PRIVATE;
814 flags |= HUGE_FLAG | pagesz_flags(pgsz);
816 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
817 if (addr == MAP_FAILED)
823 struct extmem_param {
827 rte_iova_t *iova_table;
828 unsigned int iova_table_len;
832 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
835 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
836 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
837 unsigned int cur_page, n_pages, pgsz_idx;
838 size_t mem_sz, cur_pgsz;
839 rte_iova_t *iovas = NULL;
843 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
844 /* skip anything that is too big */
845 if (pgsizes[pgsz_idx] > SIZE_MAX)
848 cur_pgsz = pgsizes[pgsz_idx];
850 /* if we were told not to allocate hugepages, override */
852 cur_pgsz = sysconf(_SC_PAGESIZE);
854 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
856 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
860 /* allocate our memory */
861 addr = alloc_mem(mem_sz, cur_pgsz, huge);
863 /* if we couldn't allocate memory with a specified page size,
864 * that doesn't mean we can't do it with other page sizes, so
870 /* store IOVA addresses for every page in this memory area */
871 n_pages = mem_sz / cur_pgsz;
873 iovas = malloc(sizeof(*iovas) * n_pages);
876 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
879 /* lock memory if it's not huge pages */
883 /* populate IOVA addresses */
884 for (cur_page = 0; cur_page < n_pages; cur_page++) {
889 offset = cur_pgsz * cur_page;
890 cur = RTE_PTR_ADD(addr, offset);
892 /* touch the page before getting its IOVA */
893 *(volatile char *)cur = 0;
895 iova = rte_mem_virt2iova(cur);
897 iovas[cur_page] = iova;
902 /* if we couldn't allocate anything */
908 param->pgsz = cur_pgsz;
909 param->iova_table = iovas;
910 param->iova_table_len = n_pages;
917 munmap(addr, mem_sz);
923 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
925 struct extmem_param param;
928 memset(¶m, 0, sizeof(param));
930 /* check if our heap exists */
931 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
933 /* create our heap */
934 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
936 TESTPMD_LOG(ERR, "Cannot create heap\n");
941 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
943 TESTPMD_LOG(ERR, "Cannot create memory area\n");
947 /* we now have a valid memory area, so add it to heap */
948 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
949 param.addr, param.len, param.iova_table,
950 param.iova_table_len, param.pgsz);
952 /* when using VFIO, memory is automatically mapped for DMA by EAL */
954 /* not needed any more */
955 free(param.iova_table);
958 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
959 munmap(param.addr, param.len);
965 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
971 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
972 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
977 RTE_ETH_FOREACH_DEV(pid) {
978 struct rte_eth_dev_info dev_info;
980 ret = eth_dev_info_get_print_err(pid, &dev_info);
983 "unable to get device info for port %d on addr 0x%p,"
984 "mempool unmapping will not be performed\n",
989 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
992 "unable to DMA unmap addr 0x%p "
994 memhdr->addr, dev_info.device->name);
997 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1000 "unable to un-register addr 0x%p\n", memhdr->addr);
1005 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1006 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1009 size_t page_size = sysconf(_SC_PAGESIZE);
1012 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1016 "unable to register addr 0x%p\n", memhdr->addr);
1019 RTE_ETH_FOREACH_DEV(pid) {
1020 struct rte_eth_dev_info dev_info;
1022 ret = eth_dev_info_get_print_err(pid, &dev_info);
1025 "unable to get device info for port %d on addr 0x%p,"
1026 "mempool mapping will not be performed\n",
1030 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1033 "unable to DMA map addr 0x%p "
1035 memhdr->addr, dev_info.device->name);
1042 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1043 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1045 struct rte_pktmbuf_extmem *xmem;
1046 unsigned int ext_num, zone_num, elt_num;
1049 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1050 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1051 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1053 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1055 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1056 "external buffer descriptors\n");
1060 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1061 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1062 const struct rte_memzone *mz;
1063 char mz_name[RTE_MEMZONE_NAMESIZE];
1066 ret = snprintf(mz_name, sizeof(mz_name),
1067 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1068 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1069 errno = ENAMETOOLONG;
1073 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1075 RTE_MEMZONE_IOVA_CONTIG |
1077 RTE_MEMZONE_SIZE_HINT_ONLY,
1081 * The caller exits on external buffer creation
1082 * error, so there is no need to free memzones.
1088 xseg->buf_ptr = mz->addr;
1089 xseg->buf_iova = mz->iova;
1090 xseg->buf_len = EXTBUF_ZONE_SIZE;
1091 xseg->elt_size = elt_size;
1093 if (ext_num == 0 && xmem != NULL) {
1102 * Configuration initialisation done once at init time.
1104 static struct rte_mempool *
1105 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1106 unsigned int socket_id, uint16_t size_idx)
1108 char pool_name[RTE_MEMPOOL_NAMESIZE];
1109 struct rte_mempool *rte_mp = NULL;
1110 #ifndef RTE_EXEC_ENV_WINDOWS
1113 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1115 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1116 if (!is_proc_primary()) {
1117 rte_mp = rte_mempool_lookup(pool_name);
1119 rte_exit(EXIT_FAILURE,
1120 "Get mbuf pool for socket %u failed: %s\n",
1121 socket_id, rte_strerror(rte_errno));
1126 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1127 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1129 switch (mp_alloc_type) {
1130 case MP_ALLOC_NATIVE:
1132 /* wrapper to rte_mempool_create() */
1133 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1134 rte_mbuf_best_mempool_ops());
1135 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1136 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1139 #ifndef RTE_EXEC_ENV_WINDOWS
1142 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1143 mb_size, (unsigned int) mb_mempool_cache,
1144 sizeof(struct rte_pktmbuf_pool_private),
1145 socket_id, mempool_flags);
1149 if (rte_mempool_populate_anon(rte_mp) == 0) {
1150 rte_mempool_free(rte_mp);
1154 rte_pktmbuf_pool_init(rte_mp, NULL);
1155 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1156 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1160 case MP_ALLOC_XMEM_HUGE:
1163 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1165 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1166 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1169 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1170 if (heap_socket < 0)
1171 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1173 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1174 rte_mbuf_best_mempool_ops());
1175 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1176 mb_mempool_cache, 0, mbuf_seg_size,
1183 struct rte_pktmbuf_extmem *ext_mem;
1184 unsigned int ext_num;
1186 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1187 socket_id, pool_name, &ext_mem);
1189 rte_exit(EXIT_FAILURE,
1190 "Can't create pinned data buffers\n");
1192 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1193 rte_mbuf_best_mempool_ops());
1194 rte_mp = rte_pktmbuf_pool_create_extbuf
1195 (pool_name, nb_mbuf, mb_mempool_cache,
1196 0, mbuf_seg_size, socket_id,
1203 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1207 #ifndef RTE_EXEC_ENV_WINDOWS
1210 if (rte_mp == NULL) {
1211 rte_exit(EXIT_FAILURE,
1212 "Creation of mbuf pool for socket %u failed: %s\n",
1213 socket_id, rte_strerror(rte_errno));
1214 } else if (verbose_level > 0) {
1215 rte_mempool_dump(stdout, rte_mp);
1221 * Check given socket id is valid or not with NUMA mode,
1222 * if valid, return 0, else return -1
1225 check_socket_id(const unsigned int socket_id)
1227 static int warning_once = 0;
1229 if (new_socket_id(socket_id)) {
1230 if (!warning_once && numa_support)
1232 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1240 * Get the allowed maximum number of RX queues.
1241 * *pid return the port id which has minimal value of
1242 * max_rx_queues in all ports.
1245 get_allowed_max_nb_rxq(portid_t *pid)
1247 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1248 bool max_rxq_valid = false;
1250 struct rte_eth_dev_info dev_info;
1252 RTE_ETH_FOREACH_DEV(pi) {
1253 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1256 max_rxq_valid = true;
1257 if (dev_info.max_rx_queues < allowed_max_rxq) {
1258 allowed_max_rxq = dev_info.max_rx_queues;
1262 return max_rxq_valid ? allowed_max_rxq : 0;
1266 * Check input rxq is valid or not.
1267 * If input rxq is not greater than any of maximum number
1268 * of RX queues of all ports, it is valid.
1269 * if valid, return 0, else return -1
1272 check_nb_rxq(queueid_t rxq)
1274 queueid_t allowed_max_rxq;
1277 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1278 if (rxq > allowed_max_rxq) {
1280 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1281 rxq, allowed_max_rxq, pid);
1288 * Get the allowed maximum number of TX queues.
1289 * *pid return the port id which has minimal value of
1290 * max_tx_queues in all ports.
1293 get_allowed_max_nb_txq(portid_t *pid)
1295 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1296 bool max_txq_valid = false;
1298 struct rte_eth_dev_info dev_info;
1300 RTE_ETH_FOREACH_DEV(pi) {
1301 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1304 max_txq_valid = true;
1305 if (dev_info.max_tx_queues < allowed_max_txq) {
1306 allowed_max_txq = dev_info.max_tx_queues;
1310 return max_txq_valid ? allowed_max_txq : 0;
1314 * Check input txq is valid or not.
1315 * If input txq is not greater than any of maximum number
1316 * of TX queues of all ports, it is valid.
1317 * if valid, return 0, else return -1
1320 check_nb_txq(queueid_t txq)
1322 queueid_t allowed_max_txq;
1325 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1326 if (txq > allowed_max_txq) {
1328 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1329 txq, allowed_max_txq, pid);
1336 * Get the allowed maximum number of RXDs of every rx queue.
1337 * *pid return the port id which has minimal value of
1338 * max_rxd in all queues of all ports.
1341 get_allowed_max_nb_rxd(portid_t *pid)
1343 uint16_t allowed_max_rxd = UINT16_MAX;
1345 struct rte_eth_dev_info dev_info;
1347 RTE_ETH_FOREACH_DEV(pi) {
1348 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1351 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1352 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1356 return allowed_max_rxd;
1360 * Get the allowed minimal number of RXDs of every rx queue.
1361 * *pid return the port id which has minimal value of
1362 * min_rxd in all queues of all ports.
1365 get_allowed_min_nb_rxd(portid_t *pid)
1367 uint16_t allowed_min_rxd = 0;
1369 struct rte_eth_dev_info dev_info;
1371 RTE_ETH_FOREACH_DEV(pi) {
1372 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1375 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1376 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1381 return allowed_min_rxd;
1385 * Check input rxd is valid or not.
1386 * If input rxd is not greater than any of maximum number
1387 * of RXDs of every Rx queues and is not less than any of
1388 * minimal number of RXDs of every Rx queues, it is valid.
1389 * if valid, return 0, else return -1
1392 check_nb_rxd(queueid_t rxd)
1394 uint16_t allowed_max_rxd;
1395 uint16_t allowed_min_rxd;
1398 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1399 if (rxd > allowed_max_rxd) {
1401 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1402 rxd, allowed_max_rxd, pid);
1406 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1407 if (rxd < allowed_min_rxd) {
1409 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1410 rxd, allowed_min_rxd, pid);
1418 * Get the allowed maximum number of TXDs of every rx queues.
1419 * *pid return the port id which has minimal value of
1420 * max_txd in every tx queue.
1423 get_allowed_max_nb_txd(portid_t *pid)
1425 uint16_t allowed_max_txd = UINT16_MAX;
1427 struct rte_eth_dev_info dev_info;
1429 RTE_ETH_FOREACH_DEV(pi) {
1430 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1433 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1434 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1438 return allowed_max_txd;
1442 * Get the allowed maximum number of TXDs of every tx queues.
1443 * *pid return the port id which has minimal value of
1444 * min_txd in every tx queue.
1447 get_allowed_min_nb_txd(portid_t *pid)
1449 uint16_t allowed_min_txd = 0;
1451 struct rte_eth_dev_info dev_info;
1453 RTE_ETH_FOREACH_DEV(pi) {
1454 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1457 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1458 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1463 return allowed_min_txd;
1467 * Check input txd is valid or not.
1468 * If input txd is not greater than any of maximum number
1469 * of TXDs of every Rx queues, it is valid.
1470 * if valid, return 0, else return -1
1473 check_nb_txd(queueid_t txd)
1475 uint16_t allowed_max_txd;
1476 uint16_t allowed_min_txd;
1479 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1480 if (txd > allowed_max_txd) {
1482 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1483 txd, allowed_max_txd, pid);
1487 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1488 if (txd < allowed_min_txd) {
1490 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1491 txd, allowed_min_txd, pid);
1499 * Get the allowed maximum number of hairpin queues.
1500 * *pid return the port id which has minimal value of
1501 * max_hairpin_queues in all ports.
1504 get_allowed_max_nb_hairpinq(portid_t *pid)
1506 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1508 struct rte_eth_hairpin_cap cap;
1510 RTE_ETH_FOREACH_DEV(pi) {
1511 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1515 if (cap.max_nb_queues < allowed_max_hairpinq) {
1516 allowed_max_hairpinq = cap.max_nb_queues;
1520 return allowed_max_hairpinq;
1524 * Check input hairpin is valid or not.
1525 * If input hairpin is not greater than any of maximum number
1526 * of hairpin queues of all ports, it is valid.
1527 * if valid, return 0, else return -1
1530 check_nb_hairpinq(queueid_t hairpinq)
1532 queueid_t allowed_max_hairpinq;
1535 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1536 if (hairpinq > allowed_max_hairpinq) {
1538 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1539 hairpinq, allowed_max_hairpinq, pid);
1546 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1548 struct rte_port *port = &ports[pid];
1553 eth_rx_metadata_negotiate_mp(pid);
1554 flow_pick_transfer_proxy_mp(pid);
1556 port->dev_conf.txmode = tx_mode;
1557 port->dev_conf.rxmode = rx_mode;
1559 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1561 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1563 ret = update_jumbo_frame_offload(pid);
1566 "Updating jumbo frame offload failed for port %u\n",
1569 if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1570 port->dev_conf.txmode.offloads &=
1571 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1573 /* Apply Rx offloads configuration */
1574 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1575 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1576 /* Apply Tx offloads configuration */
1577 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1578 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1581 port->dev_conf.link_speeds = eth_link_speed;
1583 /* set flag to initialize port/queue */
1584 port->need_reconfig = 1;
1585 port->need_reconfig_queues = 1;
1586 port->socket_id = socket_id;
1587 port->tx_metadata = 0;
1590 * Check for maximum number of segments per MTU.
1591 * Accordingly update the mbuf data size.
1593 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1594 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1595 data_size = rx_mode.max_rx_pkt_len /
1596 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1598 if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
1599 mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
1600 TESTPMD_LOG(WARNING,
1601 "Configured mbuf size of the first segment %hu\n",
1611 struct rte_mempool *mbp;
1612 unsigned int nb_mbuf_per_pool;
1614 struct rte_gro_param gro_param;
1617 /* Configuration of logical cores. */
1618 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1619 sizeof(struct fwd_lcore *) * nb_lcores,
1620 RTE_CACHE_LINE_SIZE);
1621 if (fwd_lcores == NULL) {
1622 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1623 "failed\n", nb_lcores);
1625 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1626 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1627 sizeof(struct fwd_lcore),
1628 RTE_CACHE_LINE_SIZE);
1629 if (fwd_lcores[lc_id] == NULL) {
1630 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1633 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1636 RTE_ETH_FOREACH_DEV(pid) {
1640 socket_id = port_numa[pid];
1641 if (port_numa[pid] == NUMA_NO_CONFIG) {
1642 socket_id = rte_eth_dev_socket_id(pid);
1645 * if socket_id is invalid,
1646 * set to the first available socket.
1648 if (check_socket_id(socket_id) < 0)
1649 socket_id = socket_ids[0];
1652 socket_id = (socket_num == UMA_NO_CONFIG) ?
1655 /* Apply default TxRx configuration for all ports */
1656 init_config_port_offloads(pid, socket_id);
1659 * Create pools of mbuf.
1660 * If NUMA support is disabled, create a single pool of mbuf in
1661 * socket 0 memory by default.
1662 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1664 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1665 * nb_txd can be configured at run time.
1667 if (param_total_num_mbufs)
1668 nb_mbuf_per_pool = param_total_num_mbufs;
1670 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1671 (nb_lcores * mb_mempool_cache) +
1672 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1673 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1679 for (i = 0; i < num_sockets; i++)
1680 for (j = 0; j < mbuf_data_size_n; j++)
1681 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1682 mbuf_pool_create(mbuf_data_size[j],
1688 for (i = 0; i < mbuf_data_size_n; i++)
1689 mempools[i] = mbuf_pool_create
1692 socket_num == UMA_NO_CONFIG ?
1698 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1699 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1701 * Records which Mbuf pool to use by each logical core, if needed.
1703 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1704 mbp = mbuf_pool_find(
1705 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1708 mbp = mbuf_pool_find(0, 0);
1709 fwd_lcores[lc_id]->mbp = mbp;
1710 /* initialize GSO context */
1711 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1712 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1713 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1714 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1716 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1721 /* create a gro context for each lcore */
1722 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1723 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1724 gro_param.max_item_per_flow = MAX_PKT_BURST;
1725 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1726 gro_param.socket_id = rte_lcore_to_socket_id(
1727 fwd_lcores_cpuids[lc_id]);
1728 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1729 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1730 rte_exit(EXIT_FAILURE,
1731 "rte_gro_ctx_create() failed\n");
1738 reconfig(portid_t new_port_id, unsigned socket_id)
1740 /* Reconfiguration of Ethernet ports. */
1741 init_config_port_offloads(new_port_id, socket_id);
1747 init_fwd_streams(void)
1750 struct rte_port *port;
1751 streamid_t sm_id, nb_fwd_streams_new;
1754 /* set socket id according to numa or not */
1755 RTE_ETH_FOREACH_DEV(pid) {
1757 if (nb_rxq > port->dev_info.max_rx_queues) {
1759 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1760 nb_rxq, port->dev_info.max_rx_queues);
1763 if (nb_txq > port->dev_info.max_tx_queues) {
1765 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1766 nb_txq, port->dev_info.max_tx_queues);
1770 if (port_numa[pid] != NUMA_NO_CONFIG)
1771 port->socket_id = port_numa[pid];
1773 port->socket_id = rte_eth_dev_socket_id(pid);
1776 * if socket_id is invalid,
1777 * set to the first available socket.
1779 if (check_socket_id(port->socket_id) < 0)
1780 port->socket_id = socket_ids[0];
1784 if (socket_num == UMA_NO_CONFIG)
1785 port->socket_id = 0;
1787 port->socket_id = socket_num;
1791 q = RTE_MAX(nb_rxq, nb_txq);
1794 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1797 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1798 if (nb_fwd_streams_new == nb_fwd_streams)
1801 if (fwd_streams != NULL) {
1802 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1803 if (fwd_streams[sm_id] == NULL)
1805 rte_free(fwd_streams[sm_id]);
1806 fwd_streams[sm_id] = NULL;
1808 rte_free(fwd_streams);
1813 nb_fwd_streams = nb_fwd_streams_new;
1814 if (nb_fwd_streams) {
1815 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1816 sizeof(struct fwd_stream *) * nb_fwd_streams,
1817 RTE_CACHE_LINE_SIZE);
1818 if (fwd_streams == NULL)
1819 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1820 " (struct fwd_stream *)) failed\n",
1823 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1824 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1825 " struct fwd_stream", sizeof(struct fwd_stream),
1826 RTE_CACHE_LINE_SIZE);
1827 if (fwd_streams[sm_id] == NULL)
1828 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1829 "(struct fwd_stream) failed\n");
1837 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1839 uint64_t total_burst, sburst;
1841 uint64_t burst_stats[4];
1842 uint16_t pktnb_stats[4];
1844 int burst_percent[4], sburstp;
1848 * First compute the total number of packet bursts and the
1849 * two highest numbers of bursts of the same number of packets.
1851 memset(&burst_stats, 0x0, sizeof(burst_stats));
1852 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1854 /* Show stats for 0 burst size always */
1855 total_burst = pbs->pkt_burst_spread[0];
1856 burst_stats[0] = pbs->pkt_burst_spread[0];
1859 /* Find the next 2 burst sizes with highest occurrences. */
1860 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1861 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1866 total_burst += nb_burst;
1868 if (nb_burst > burst_stats[1]) {
1869 burst_stats[2] = burst_stats[1];
1870 pktnb_stats[2] = pktnb_stats[1];
1871 burst_stats[1] = nb_burst;
1872 pktnb_stats[1] = nb_pkt;
1873 } else if (nb_burst > burst_stats[2]) {
1874 burst_stats[2] = nb_burst;
1875 pktnb_stats[2] = nb_pkt;
1878 if (total_burst == 0)
1881 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1882 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1884 printf("%d%% of other]\n", 100 - sburstp);
1888 sburst += burst_stats[i];
1889 if (sburst == total_burst) {
1890 printf("%d%% of %d pkts]\n",
1891 100 - sburstp, (int) pktnb_stats[i]);
1896 (double)burst_stats[i] / total_burst * 100;
1897 printf("%d%% of %d pkts + ",
1898 burst_percent[i], (int) pktnb_stats[i]);
1899 sburstp += burst_percent[i];
1904 fwd_stream_stats_display(streamid_t stream_id)
1906 struct fwd_stream *fs;
1907 static const char *fwd_top_stats_border = "-------";
1909 fs = fwd_streams[stream_id];
1910 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1911 (fs->fwd_dropped == 0))
1913 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1914 "TX Port=%2d/Queue=%2d %s\n",
1915 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1916 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1917 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1918 " TX-dropped: %-14"PRIu64,
1919 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1921 /* if checksum mode */
1922 if (cur_fwd_eng == &csum_fwd_engine) {
1923 printf(" RX- bad IP checksum: %-14"PRIu64
1924 " Rx- bad L4 checksum: %-14"PRIu64
1925 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1926 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1927 fs->rx_bad_outer_l4_csum);
1928 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1929 fs->rx_bad_outer_ip_csum);
1934 if (record_burst_stats) {
1935 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1936 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1941 fwd_stats_display(void)
1943 static const char *fwd_stats_border = "----------------------";
1944 static const char *acc_stats_border = "+++++++++++++++";
1946 struct fwd_stream *rx_stream;
1947 struct fwd_stream *tx_stream;
1948 uint64_t tx_dropped;
1949 uint64_t rx_bad_ip_csum;
1950 uint64_t rx_bad_l4_csum;
1951 uint64_t rx_bad_outer_l4_csum;
1952 uint64_t rx_bad_outer_ip_csum;
1953 } ports_stats[RTE_MAX_ETHPORTS];
1954 uint64_t total_rx_dropped = 0;
1955 uint64_t total_tx_dropped = 0;
1956 uint64_t total_rx_nombuf = 0;
1957 struct rte_eth_stats stats;
1958 uint64_t fwd_cycles = 0;
1959 uint64_t total_recv = 0;
1960 uint64_t total_xmit = 0;
1961 struct rte_port *port;
1966 memset(ports_stats, 0, sizeof(ports_stats));
1968 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1969 struct fwd_stream *fs = fwd_streams[sm_id];
1971 if (cur_fwd_config.nb_fwd_streams >
1972 cur_fwd_config.nb_fwd_ports) {
1973 fwd_stream_stats_display(sm_id);
1975 ports_stats[fs->tx_port].tx_stream = fs;
1976 ports_stats[fs->rx_port].rx_stream = fs;
1979 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1981 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1982 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1983 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1984 fs->rx_bad_outer_l4_csum;
1985 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
1986 fs->rx_bad_outer_ip_csum;
1988 if (record_core_cycles)
1989 fwd_cycles += fs->core_cycles;
1991 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1992 pt_id = fwd_ports_ids[i];
1993 port = &ports[pt_id];
1995 rte_eth_stats_get(pt_id, &stats);
1996 stats.ipackets -= port->stats.ipackets;
1997 stats.opackets -= port->stats.opackets;
1998 stats.ibytes -= port->stats.ibytes;
1999 stats.obytes -= port->stats.obytes;
2000 stats.imissed -= port->stats.imissed;
2001 stats.oerrors -= port->stats.oerrors;
2002 stats.rx_nombuf -= port->stats.rx_nombuf;
2004 total_recv += stats.ipackets;
2005 total_xmit += stats.opackets;
2006 total_rx_dropped += stats.imissed;
2007 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2008 total_tx_dropped += stats.oerrors;
2009 total_rx_nombuf += stats.rx_nombuf;
2011 printf("\n %s Forward statistics for port %-2d %s\n",
2012 fwd_stats_border, pt_id, fwd_stats_border);
2014 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2015 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2016 stats.ipackets + stats.imissed);
2018 if (cur_fwd_eng == &csum_fwd_engine) {
2019 printf(" Bad-ipcsum: %-14"PRIu64
2020 " Bad-l4csum: %-14"PRIu64
2021 "Bad-outer-l4csum: %-14"PRIu64"\n",
2022 ports_stats[pt_id].rx_bad_ip_csum,
2023 ports_stats[pt_id].rx_bad_l4_csum,
2024 ports_stats[pt_id].rx_bad_outer_l4_csum);
2025 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2026 ports_stats[pt_id].rx_bad_outer_ip_csum);
2028 if (stats.ierrors + stats.rx_nombuf > 0) {
2029 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2030 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2033 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2034 "TX-total: %-"PRIu64"\n",
2035 stats.opackets, ports_stats[pt_id].tx_dropped,
2036 stats.opackets + ports_stats[pt_id].tx_dropped);
2038 if (record_burst_stats) {
2039 if (ports_stats[pt_id].rx_stream)
2040 pkt_burst_stats_display("RX",
2041 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2042 if (ports_stats[pt_id].tx_stream)
2043 pkt_burst_stats_display("TX",
2044 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2047 printf(" %s--------------------------------%s\n",
2048 fwd_stats_border, fwd_stats_border);
2051 printf("\n %s Accumulated forward statistics for all ports"
2053 acc_stats_border, acc_stats_border);
2054 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2056 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2058 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2059 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2060 if (total_rx_nombuf > 0)
2061 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2062 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2064 acc_stats_border, acc_stats_border);
2065 if (record_core_cycles) {
2066 #define CYC_PER_MHZ 1E6
2067 if (total_recv > 0 || total_xmit > 0) {
2068 uint64_t total_pkts = 0;
2069 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2070 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2071 total_pkts = total_xmit;
2073 total_pkts = total_recv;
2075 printf("\n CPU cycles/packet=%.2F (total cycles="
2076 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2078 (double) fwd_cycles / total_pkts,
2079 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2080 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2086 fwd_stats_reset(void)
2092 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2093 pt_id = fwd_ports_ids[i];
2094 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2096 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2097 struct fwd_stream *fs = fwd_streams[sm_id];
2101 fs->fwd_dropped = 0;
2102 fs->rx_bad_ip_csum = 0;
2103 fs->rx_bad_l4_csum = 0;
2104 fs->rx_bad_outer_l4_csum = 0;
2105 fs->rx_bad_outer_ip_csum = 0;
2107 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2108 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2109 fs->core_cycles = 0;
2114 flush_fwd_rx_queues(void)
2116 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2123 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2124 uint64_t timer_period;
2126 if (num_procs > 1) {
2127 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2131 /* convert to number of cycles */
2132 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2134 for (j = 0; j < 2; j++) {
2135 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2136 for (rxq = 0; rxq < nb_rxq; rxq++) {
2137 port_id = fwd_ports_ids[rxp];
2139 * testpmd can stuck in the below do while loop
2140 * if rte_eth_rx_burst() always returns nonzero
2141 * packets. So timer is added to exit this loop
2142 * after 1sec timer expiry.
2144 prev_tsc = rte_rdtsc();
2146 nb_rx = rte_eth_rx_burst(port_id, rxq,
2147 pkts_burst, MAX_PKT_BURST);
2148 for (i = 0; i < nb_rx; i++)
2149 rte_pktmbuf_free(pkts_burst[i]);
2151 cur_tsc = rte_rdtsc();
2152 diff_tsc = cur_tsc - prev_tsc;
2153 timer_tsc += diff_tsc;
2154 } while ((nb_rx > 0) &&
2155 (timer_tsc < timer_period));
2159 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2164 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2166 struct fwd_stream **fsm;
2169 #ifdef RTE_LIB_BITRATESTATS
2170 uint64_t tics_per_1sec;
2171 uint64_t tics_datum;
2172 uint64_t tics_current;
2173 uint16_t i, cnt_ports;
2175 cnt_ports = nb_ports;
2176 tics_datum = rte_rdtsc();
2177 tics_per_1sec = rte_get_timer_hz();
2179 fsm = &fwd_streams[fc->stream_idx];
2180 nb_fs = fc->stream_nb;
2182 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2183 (*pkt_fwd)(fsm[sm_id]);
2184 #ifdef RTE_LIB_BITRATESTATS
2185 if (bitrate_enabled != 0 &&
2186 bitrate_lcore_id == rte_lcore_id()) {
2187 tics_current = rte_rdtsc();
2188 if (tics_current - tics_datum >= tics_per_1sec) {
2189 /* Periodic bitrate calculation */
2190 for (i = 0; i < cnt_ports; i++)
2191 rte_stats_bitrate_calc(bitrate_data,
2193 tics_datum = tics_current;
2197 #ifdef RTE_LIB_LATENCYSTATS
2198 if (latencystats_enabled != 0 &&
2199 latencystats_lcore_id == rte_lcore_id())
2200 rte_latencystats_update();
2203 } while (! fc->stopped);
2207 start_pkt_forward_on_core(void *fwd_arg)
2209 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2210 cur_fwd_config.fwd_eng->packet_fwd);
2215 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2216 * Used to start communication flows in network loopback test configurations.
2219 run_one_txonly_burst_on_core(void *fwd_arg)
2221 struct fwd_lcore *fwd_lc;
2222 struct fwd_lcore tmp_lcore;
2224 fwd_lc = (struct fwd_lcore *) fwd_arg;
2225 tmp_lcore = *fwd_lc;
2226 tmp_lcore.stopped = 1;
2227 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2232 * Launch packet forwarding:
2233 * - Setup per-port forwarding context.
2234 * - launch logical cores with their forwarding configuration.
2237 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2243 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2244 lc_id = fwd_lcores_cpuids[i];
2245 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2246 fwd_lcores[i]->stopped = 0;
2247 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2248 fwd_lcores[i], lc_id);
2251 "launch lcore %u failed - diag=%d\n",
2258 * Launch packet forwarding configuration.
2261 start_packet_forwarding(int with_tx_first)
2263 port_fwd_begin_t port_fwd_begin;
2264 port_fwd_end_t port_fwd_end;
2267 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2268 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2270 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2271 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2273 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2274 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2275 (!nb_rxq || !nb_txq))
2276 rte_exit(EXIT_FAILURE,
2277 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2278 cur_fwd_eng->fwd_mode_name);
2280 if (all_ports_started() == 0) {
2281 fprintf(stderr, "Not all ports were started\n");
2284 if (test_done == 0) {
2285 fprintf(stderr, "Packet forwarding already started\n");
2291 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2292 if (port_fwd_begin != NULL) {
2293 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2294 if (port_fwd_begin(fwd_ports_ids[i])) {
2296 "Packet forwarding is not ready\n");
2302 if (with_tx_first) {
2303 port_fwd_begin = tx_only_engine.port_fwd_begin;
2304 if (port_fwd_begin != NULL) {
2305 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2306 if (port_fwd_begin(fwd_ports_ids[i])) {
2308 "Packet forwarding is not ready\n");
2318 flush_fwd_rx_queues();
2320 pkt_fwd_config_display(&cur_fwd_config);
2321 rxtx_config_display();
2324 if (with_tx_first) {
2325 while (with_tx_first--) {
2326 launch_packet_forwarding(
2327 run_one_txonly_burst_on_core);
2328 rte_eal_mp_wait_lcore();
2330 port_fwd_end = tx_only_engine.port_fwd_end;
2331 if (port_fwd_end != NULL) {
2332 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2333 (*port_fwd_end)(fwd_ports_ids[i]);
2336 launch_packet_forwarding(start_pkt_forward_on_core);
2340 stop_packet_forwarding(void)
2342 port_fwd_end_t port_fwd_end;
2348 fprintf(stderr, "Packet forwarding not started\n");
2351 printf("Telling cores to stop...");
2352 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2353 fwd_lcores[lc_id]->stopped = 1;
2354 printf("\nWaiting for lcores to finish...\n");
2355 rte_eal_mp_wait_lcore();
2356 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2357 if (port_fwd_end != NULL) {
2358 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2359 pt_id = fwd_ports_ids[i];
2360 (*port_fwd_end)(pt_id);
2364 fwd_stats_display();
2366 printf("\nDone.\n");
2371 dev_set_link_up(portid_t pid)
2373 if (rte_eth_dev_set_link_up(pid) < 0)
2374 fprintf(stderr, "\nSet link up fail.\n");
2378 dev_set_link_down(portid_t pid)
2380 if (rte_eth_dev_set_link_down(pid) < 0)
2381 fprintf(stderr, "\nSet link down fail.\n");
2385 all_ports_started(void)
2388 struct rte_port *port;
2390 RTE_ETH_FOREACH_DEV(pi) {
2392 /* Check if there is a port which is not started */
2393 if ((port->port_status != RTE_PORT_STARTED) &&
2394 (port->slave_flag == 0))
2398 /* No port is not started */
2403 port_is_stopped(portid_t port_id)
2405 struct rte_port *port = &ports[port_id];
2407 if ((port->port_status != RTE_PORT_STOPPED) &&
2408 (port->slave_flag == 0))
2414 all_ports_stopped(void)
2418 RTE_ETH_FOREACH_DEV(pi) {
2419 if (!port_is_stopped(pi))
2427 port_is_started(portid_t port_id)
2429 if (port_id_is_invalid(port_id, ENABLED_WARN))
2432 if (ports[port_id].port_status != RTE_PORT_STARTED)
2438 /* Configure the Rx and Tx hairpin queues for the selected port. */
2440 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2443 struct rte_eth_hairpin_conf hairpin_conf = {
2448 struct rte_port *port = &ports[pi];
2449 uint16_t peer_rx_port = pi;
2450 uint16_t peer_tx_port = pi;
2451 uint32_t manual = 1;
2452 uint32_t tx_exp = hairpin_mode & 0x10;
2454 if (!(hairpin_mode & 0xf)) {
2458 } else if (hairpin_mode & 0x1) {
2459 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2460 RTE_ETH_DEV_NO_OWNER);
2461 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2462 peer_tx_port = rte_eth_find_next_owned_by(0,
2463 RTE_ETH_DEV_NO_OWNER);
2464 if (p_pi != RTE_MAX_ETHPORTS) {
2465 peer_rx_port = p_pi;
2469 /* Last port will be the peer RX port of the first. */
2470 RTE_ETH_FOREACH_DEV(next_pi)
2471 peer_rx_port = next_pi;
2474 } else if (hairpin_mode & 0x2) {
2476 peer_rx_port = p_pi;
2478 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2479 RTE_ETH_DEV_NO_OWNER);
2480 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2483 peer_tx_port = peer_rx_port;
2487 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2488 hairpin_conf.peers[0].port = peer_rx_port;
2489 hairpin_conf.peers[0].queue = i + nb_rxq;
2490 hairpin_conf.manual_bind = !!manual;
2491 hairpin_conf.tx_explicit = !!tx_exp;
2492 diag = rte_eth_tx_hairpin_queue_setup
2493 (pi, qi, nb_txd, &hairpin_conf);
2498 /* Fail to setup rx queue, return */
2499 if (rte_atomic16_cmpset(&(port->port_status),
2501 RTE_PORT_STOPPED) == 0)
2503 "Port %d can not be set back to stopped\n", pi);
2504 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2506 /* try to reconfigure queues next time */
2507 port->need_reconfig_queues = 1;
2510 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2511 hairpin_conf.peers[0].port = peer_tx_port;
2512 hairpin_conf.peers[0].queue = i + nb_txq;
2513 hairpin_conf.manual_bind = !!manual;
2514 hairpin_conf.tx_explicit = !!tx_exp;
2515 diag = rte_eth_rx_hairpin_queue_setup
2516 (pi, qi, nb_rxd, &hairpin_conf);
2521 /* Fail to setup rx queue, return */
2522 if (rte_atomic16_cmpset(&(port->port_status),
2524 RTE_PORT_STOPPED) == 0)
2526 "Port %d can not be set back to stopped\n", pi);
2527 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2529 /* try to reconfigure queues next time */
2530 port->need_reconfig_queues = 1;
2536 /* Configure the Rx with optional split. */
2538 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2539 uint16_t nb_rx_desc, unsigned int socket_id,
2540 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2542 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2543 unsigned int i, mp_n;
2546 if (rx_pkt_nb_segs <= 1 ||
2547 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2548 rx_conf->rx_seg = NULL;
2549 rx_conf->rx_nseg = 0;
2550 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2551 nb_rx_desc, socket_id,
2555 for (i = 0; i < rx_pkt_nb_segs; i++) {
2556 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2557 struct rte_mempool *mpx;
2559 * Use last valid pool for the segments with number
2560 * exceeding the pool index.
2562 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2563 mpx = mbuf_pool_find(socket_id, mp_n);
2564 /* Handle zero as mbuf data buffer size. */
2565 rx_seg->length = rx_pkt_seg_lengths[i] ?
2566 rx_pkt_seg_lengths[i] :
2567 mbuf_data_size[mp_n];
2568 rx_seg->offset = i < rx_pkt_nb_offs ?
2569 rx_pkt_seg_offsets[i] : 0;
2570 rx_seg->mp = mpx ? mpx : mp;
2572 rx_conf->rx_nseg = rx_pkt_nb_segs;
2573 rx_conf->rx_seg = rx_useg;
2574 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2575 socket_id, rx_conf, NULL);
2576 rx_conf->rx_seg = NULL;
2577 rx_conf->rx_nseg = 0;
2582 alloc_xstats_display_info(portid_t pi)
2584 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2585 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2586 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2588 if (xstats_display_num == 0)
2591 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2592 if (*ids_supp == NULL)
2595 *prev_values = calloc(xstats_display_num,
2596 sizeof(**prev_values));
2597 if (*prev_values == NULL)
2598 goto fail_prev_values;
2600 *curr_values = calloc(xstats_display_num,
2601 sizeof(**curr_values));
2602 if (*curr_values == NULL)
2603 goto fail_curr_values;
2605 ports[pi].xstats_info.allocated = true;
2618 free_xstats_display_info(portid_t pi)
2620 if (!ports[pi].xstats_info.allocated)
2622 free(ports[pi].xstats_info.ids_supp);
2623 free(ports[pi].xstats_info.prev_values);
2624 free(ports[pi].xstats_info.curr_values);
2625 ports[pi].xstats_info.allocated = false;
2628 /** Fill helper structures for specified port to show extended statistics. */
2630 fill_xstats_display_info_for_port(portid_t pi)
2632 unsigned int stat, stat_supp;
2633 const char *xstat_name;
2634 struct rte_port *port;
2638 if (xstats_display_num == 0)
2641 if (pi == (portid_t)RTE_PORT_ALL) {
2642 fill_xstats_display_info();
2647 if (port->port_status != RTE_PORT_STARTED)
2650 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2651 rte_exit(EXIT_FAILURE,
2652 "Failed to allocate xstats display memory\n");
2654 ids_supp = port->xstats_info.ids_supp;
2655 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2656 xstat_name = xstats_display[stat].name;
2657 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2658 ids_supp + stat_supp);
2660 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2661 xstat_name, pi, stat);
2667 port->xstats_info.ids_supp_sz = stat_supp;
2670 /** Fill helper structures for all ports to show extended statistics. */
2672 fill_xstats_display_info(void)
2676 if (xstats_display_num == 0)
2679 RTE_ETH_FOREACH_DEV(pi)
2680 fill_xstats_display_info_for_port(pi);
2684 start_port(portid_t pid)
2686 int diag, need_check_link_status = -1;
2688 portid_t p_pi = RTE_MAX_ETHPORTS;
2689 portid_t pl[RTE_MAX_ETHPORTS];
2690 portid_t peer_pl[RTE_MAX_ETHPORTS];
2691 uint16_t cnt_pi = 0;
2692 uint16_t cfg_pi = 0;
2695 struct rte_port *port;
2696 struct rte_eth_hairpin_cap cap;
2698 if (port_id_is_invalid(pid, ENABLED_WARN))
2701 RTE_ETH_FOREACH_DEV(pi) {
2702 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2705 need_check_link_status = 0;
2707 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2708 RTE_PORT_HANDLING) == 0) {
2709 fprintf(stderr, "Port %d is now not stopped\n", pi);
2713 if (port->need_reconfig > 0) {
2714 port->need_reconfig = 0;
2716 if (flow_isolate_all) {
2717 int ret = port_flow_isolate(pi, 1);
2720 "Failed to apply isolated mode on port %d\n",
2725 configure_rxtx_dump_callbacks(0);
2726 printf("Configuring Port %d (socket %u)\n", pi,
2728 if (nb_hairpinq > 0 &&
2729 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2731 "Port %d doesn't support hairpin queues\n",
2735 /* configure port */
2736 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2737 nb_txq + nb_hairpinq,
2740 if (rte_atomic16_cmpset(&(port->port_status),
2741 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2743 "Port %d can not be set back to stopped\n",
2745 fprintf(stderr, "Fail to configure port %d\n",
2747 /* try to reconfigure port next time */
2748 port->need_reconfig = 1;
2752 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2753 port->need_reconfig_queues = 0;
2754 /* setup tx queues */
2755 for (qi = 0; qi < nb_txq; qi++) {
2756 if ((numa_support) &&
2757 (txring_numa[pi] != NUMA_NO_CONFIG))
2758 diag = rte_eth_tx_queue_setup(pi, qi,
2759 port->nb_tx_desc[qi],
2761 &(port->tx_conf[qi]));
2763 diag = rte_eth_tx_queue_setup(pi, qi,
2764 port->nb_tx_desc[qi],
2766 &(port->tx_conf[qi]));
2771 /* Fail to setup tx queue, return */
2772 if (rte_atomic16_cmpset(&(port->port_status),
2774 RTE_PORT_STOPPED) == 0)
2776 "Port %d can not be set back to stopped\n",
2779 "Fail to configure port %d tx queues\n",
2781 /* try to reconfigure queues next time */
2782 port->need_reconfig_queues = 1;
2785 for (qi = 0; qi < nb_rxq; qi++) {
2786 /* setup rx queues */
2787 if ((numa_support) &&
2788 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2789 struct rte_mempool * mp =
2791 (rxring_numa[pi], 0);
2794 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2799 diag = rx_queue_setup(pi, qi,
2800 port->nb_rx_desc[qi],
2802 &(port->rx_conf[qi]),
2805 struct rte_mempool *mp =
2807 (port->socket_id, 0);
2810 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2814 diag = rx_queue_setup(pi, qi,
2815 port->nb_rx_desc[qi],
2817 &(port->rx_conf[qi]),
2823 /* Fail to setup rx queue, return */
2824 if (rte_atomic16_cmpset(&(port->port_status),
2826 RTE_PORT_STOPPED) == 0)
2828 "Port %d can not be set back to stopped\n",
2831 "Fail to configure port %d rx queues\n",
2833 /* try to reconfigure queues next time */
2834 port->need_reconfig_queues = 1;
2837 /* setup hairpin queues */
2838 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2841 configure_rxtx_dump_callbacks(verbose_level);
2843 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2847 "Port %d: Failed to disable Ptype parsing\n",
2855 diag = eth_dev_start_mp(pi);
2857 fprintf(stderr, "Fail to start port %d: %s\n",
2858 pi, rte_strerror(-diag));
2860 /* Fail to setup rx queue, return */
2861 if (rte_atomic16_cmpset(&(port->port_status),
2862 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2864 "Port %d can not be set back to stopped\n",
2869 if (rte_atomic16_cmpset(&(port->port_status),
2870 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2871 fprintf(stderr, "Port %d can not be set into started\n",
2874 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2875 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2876 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2878 /* at least one port started, need checking link status */
2879 need_check_link_status = 1;
2884 if (need_check_link_status == 1 && !no_link_check)
2885 check_all_ports_link_status(RTE_PORT_ALL);
2886 else if (need_check_link_status == 0)
2887 fprintf(stderr, "Please stop the ports first\n");
2889 if (hairpin_mode & 0xf) {
2893 /* bind all started hairpin ports */
2894 for (i = 0; i < cfg_pi; i++) {
2896 /* bind current Tx to all peer Rx */
2897 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2898 RTE_MAX_ETHPORTS, 1);
2901 for (j = 0; j < peer_pi; j++) {
2902 if (!port_is_started(peer_pl[j]))
2904 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2907 "Error during binding hairpin Tx port %u to %u: %s\n",
2909 rte_strerror(-diag));
2913 /* bind all peer Tx to current Rx */
2914 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2915 RTE_MAX_ETHPORTS, 0);
2918 for (j = 0; j < peer_pi; j++) {
2919 if (!port_is_started(peer_pl[j]))
2921 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2924 "Error during binding hairpin Tx port %u to %u: %s\n",
2926 rte_strerror(-diag));
2933 fill_xstats_display_info_for_port(pid);
2940 stop_port(portid_t pid)
2943 struct rte_port *port;
2944 int need_check_link_status = 0;
2945 portid_t peer_pl[RTE_MAX_ETHPORTS];
2948 if (port_id_is_invalid(pid, ENABLED_WARN))
2951 printf("Stopping ports...\n");
2953 RTE_ETH_FOREACH_DEV(pi) {
2954 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2957 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2959 "Please remove port %d from forwarding configuration.\n",
2964 if (port_is_bonding_slave(pi)) {
2966 "Please remove port %d from bonded device.\n",
2972 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2973 RTE_PORT_HANDLING) == 0)
2976 if (hairpin_mode & 0xf) {
2979 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2980 /* unbind all peer Tx from current Rx */
2981 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2982 RTE_MAX_ETHPORTS, 0);
2985 for (j = 0; j < peer_pi; j++) {
2986 if (!port_is_started(peer_pl[j]))
2988 rte_eth_hairpin_unbind(peer_pl[j], pi);
2992 if (port->flow_list)
2993 port_flow_flush(pi);
2995 if (eth_dev_stop_mp(pi) != 0)
2996 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2999 if (rte_atomic16_cmpset(&(port->port_status),
3000 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
3001 fprintf(stderr, "Port %d can not be set into stopped\n",
3003 need_check_link_status = 1;
3005 if (need_check_link_status && !no_link_check)
3006 check_all_ports_link_status(RTE_PORT_ALL);
3012 remove_invalid_ports_in(portid_t *array, portid_t *total)
3015 portid_t new_total = 0;
3017 for (i = 0; i < *total; i++)
3018 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3019 array[new_total] = array[i];
3026 remove_invalid_ports(void)
3028 remove_invalid_ports_in(ports_ids, &nb_ports);
3029 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3030 nb_cfg_ports = nb_fwd_ports;
3034 close_port(portid_t pid)
3037 struct rte_port *port;
3039 if (port_id_is_invalid(pid, ENABLED_WARN))
3042 printf("Closing ports...\n");
3044 RTE_ETH_FOREACH_DEV(pi) {
3045 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3048 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3050 "Please remove port %d from forwarding configuration.\n",
3055 if (port_is_bonding_slave(pi)) {
3057 "Please remove port %d from bonded device.\n",
3063 if (rte_atomic16_cmpset(&(port->port_status),
3064 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
3065 fprintf(stderr, "Port %d is already closed\n", pi);
3069 if (is_proc_primary()) {
3070 port_flow_flush(pi);
3071 rte_eth_dev_close(pi);
3074 free_xstats_display_info(pi);
3077 remove_invalid_ports();
3082 reset_port(portid_t pid)
3086 struct rte_port *port;
3088 if (port_id_is_invalid(pid, ENABLED_WARN))
3091 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3092 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3094 "Can not reset port(s), please stop port(s) first.\n");
3098 printf("Resetting ports...\n");
3100 RTE_ETH_FOREACH_DEV(pi) {
3101 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3104 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3106 "Please remove port %d from forwarding configuration.\n",
3111 if (port_is_bonding_slave(pi)) {
3113 "Please remove port %d from bonded device.\n",
3118 diag = rte_eth_dev_reset(pi);
3121 port->need_reconfig = 1;
3122 port->need_reconfig_queues = 1;
3124 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3133 attach_port(char *identifier)
3136 struct rte_dev_iterator iterator;
3138 printf("Attaching a new port...\n");
3140 if (identifier == NULL) {
3141 fprintf(stderr, "Invalid parameters are specified\n");
3145 if (rte_dev_probe(identifier) < 0) {
3146 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3150 /* first attach mode: event */
3151 if (setup_on_probe_event) {
3152 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3153 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3154 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3155 ports[pi].need_setup != 0)
3156 setup_attached_port(pi);
3160 /* second attach mode: iterator */
3161 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3162 /* setup ports matching the devargs used for probing */
3163 if (port_is_forwarding(pi))
3164 continue; /* port was already attached before */
3165 setup_attached_port(pi);
3170 setup_attached_port(portid_t pi)
3172 unsigned int socket_id;
3175 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3176 /* if socket_id is invalid, set to the first available socket. */
3177 if (check_socket_id(socket_id) < 0)
3178 socket_id = socket_ids[0];
3179 reconfig(pi, socket_id);
3180 ret = rte_eth_promiscuous_enable(pi);
3183 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3184 pi, rte_strerror(-ret));
3186 ports_ids[nb_ports++] = pi;
3187 fwd_ports_ids[nb_fwd_ports++] = pi;
3188 nb_cfg_ports = nb_fwd_ports;
3189 ports[pi].need_setup = 0;
3190 ports[pi].port_status = RTE_PORT_STOPPED;
3192 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3197 detach_device(struct rte_device *dev)
3202 fprintf(stderr, "Device already removed\n");
3206 printf("Removing a device...\n");
3208 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3209 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3210 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3211 fprintf(stderr, "Port %u not stopped\n",
3215 port_flow_flush(sibling);
3219 if (rte_dev_remove(dev) < 0) {
3220 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3223 remove_invalid_ports();
3225 printf("Device is detached\n");
3226 printf("Now total ports is %d\n", nb_ports);
3232 detach_port_device(portid_t port_id)
3235 struct rte_eth_dev_info dev_info;
3237 if (port_id_is_invalid(port_id, ENABLED_WARN))
3240 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3241 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3242 fprintf(stderr, "Port not stopped\n");
3245 fprintf(stderr, "Port was not closed\n");
3248 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3251 "Failed to get device info for port %d, not detaching\n",
3255 detach_device(dev_info.device);
3259 detach_devargs(char *identifier)
3261 struct rte_dev_iterator iterator;
3262 struct rte_devargs da;
3265 printf("Removing a device...\n");
3267 memset(&da, 0, sizeof(da));
3268 if (rte_devargs_parsef(&da, "%s", identifier)) {
3269 fprintf(stderr, "cannot parse identifier\n");
3273 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3274 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3275 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3276 fprintf(stderr, "Port %u not stopped\n",
3278 rte_eth_iterator_cleanup(&iterator);
3279 rte_devargs_reset(&da);
3282 port_flow_flush(port_id);
3286 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3287 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3288 da.name, da.bus->name);
3289 rte_devargs_reset(&da);
3293 remove_invalid_ports();
3295 printf("Device %s is detached\n", identifier);
3296 printf("Now total ports is %d\n", nb_ports);
3298 rte_devargs_reset(&da);
3309 stop_packet_forwarding();
3311 #ifndef RTE_EXEC_ENV_WINDOWS
3312 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3314 if (mp_alloc_type == MP_ALLOC_ANON)
3315 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3320 if (ports != NULL) {
3322 RTE_ETH_FOREACH_DEV(pt_id) {
3323 printf("\nStopping port %d...\n", pt_id);
3327 RTE_ETH_FOREACH_DEV(pt_id) {
3328 printf("\nShutting down port %d...\n", pt_id);
3335 ret = rte_dev_event_monitor_stop();
3338 "fail to stop device event monitor.");
3342 ret = rte_dev_event_callback_unregister(NULL,
3343 dev_event_callback, NULL);
3346 "fail to unregister device event callback.\n");
3350 ret = rte_dev_hotplug_handle_disable();
3353 "fail to disable hotplug handling.\n");
3357 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3359 mempool_free_mp(mempools[i]);
3361 free(xstats_display);
3363 printf("\nBye...\n");
3366 typedef void (*cmd_func_t)(void);
3367 struct pmd_test_command {
3368 const char *cmd_name;
3369 cmd_func_t cmd_func;
3372 /* Check the link status of all ports in up to 9s, and print them finally */
3374 check_all_ports_link_status(uint32_t port_mask)
3376 #define CHECK_INTERVAL 100 /* 100ms */
3377 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3379 uint8_t count, all_ports_up, print_flag = 0;
3380 struct rte_eth_link link;
3382 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3384 printf("Checking link statuses...\n");
3386 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3388 RTE_ETH_FOREACH_DEV(portid) {
3389 if ((port_mask & (1 << portid)) == 0)
3391 memset(&link, 0, sizeof(link));
3392 ret = rte_eth_link_get_nowait(portid, &link);
3395 if (print_flag == 1)
3397 "Port %u link get failed: %s\n",
3398 portid, rte_strerror(-ret));
3401 /* print link status if flag set */
3402 if (print_flag == 1) {
3403 rte_eth_link_to_str(link_status,
3404 sizeof(link_status), &link);
3405 printf("Port %d %s\n", portid, link_status);
3408 /* clear all_ports_up flag if any link down */
3409 if (link.link_status == ETH_LINK_DOWN) {
3414 /* after finally printing all link status, get out */
3415 if (print_flag == 1)
3418 if (all_ports_up == 0) {
3420 rte_delay_ms(CHECK_INTERVAL);
3423 /* set the print_flag if all ports up or timeout */
3424 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3434 rmv_port_callback(void *arg)
3436 int need_to_start = 0;
3437 int org_no_link_check = no_link_check;
3438 portid_t port_id = (intptr_t)arg;
3439 struct rte_eth_dev_info dev_info;
3442 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3444 if (!test_done && port_is_forwarding(port_id)) {
3446 stop_packet_forwarding();
3450 no_link_check = org_no_link_check;
3452 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3455 "Failed to get device info for port %d, not detaching\n",
3458 struct rte_device *device = dev_info.device;
3459 close_port(port_id);
3460 detach_device(device); /* might be already removed or have more ports */
3463 start_packet_forwarding(0);
3466 /* This function is used by the interrupt thread */
3468 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3471 RTE_SET_USED(param);
3472 RTE_SET_USED(ret_param);
3474 if (type >= RTE_ETH_EVENT_MAX) {
3476 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3477 port_id, __func__, type);
3479 } else if (event_print_mask & (UINT32_C(1) << type)) {
3480 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3481 eth_event_desc[type]);
3486 case RTE_ETH_EVENT_NEW:
3487 ports[port_id].need_setup = 1;
3488 ports[port_id].port_status = RTE_PORT_HANDLING;
3490 case RTE_ETH_EVENT_INTR_RMV:
3491 if (port_id_is_invalid(port_id, DISABLED_WARN))
3493 if (rte_eal_alarm_set(100000,
3494 rmv_port_callback, (void *)(intptr_t)port_id))
3496 "Could not set up deferred device removal\n");
3498 case RTE_ETH_EVENT_DESTROY:
3499 ports[port_id].port_status = RTE_PORT_CLOSED;
3500 printf("Port %u is closed\n", port_id);
3509 register_eth_event_callback(void)
3512 enum rte_eth_event_type event;
3514 for (event = RTE_ETH_EVENT_UNKNOWN;
3515 event < RTE_ETH_EVENT_MAX; event++) {
3516 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3521 TESTPMD_LOG(ERR, "Failed to register callback for "
3522 "%s event\n", eth_event_desc[event]);
3530 /* This function is used by the interrupt thread */
3532 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3533 __rte_unused void *arg)
3538 if (type >= RTE_DEV_EVENT_MAX) {
3539 fprintf(stderr, "%s called upon invalid event %d\n",
3545 case RTE_DEV_EVENT_REMOVE:
3546 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3548 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3550 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3555 * Because the user's callback is invoked in eal interrupt
3556 * callback, the interrupt callback need to be finished before
3557 * it can be unregistered when detaching device. So finish
3558 * callback soon and use a deferred removal to detach device
3559 * is need. It is a workaround, once the device detaching be
3560 * moved into the eal in the future, the deferred removal could
3563 if (rte_eal_alarm_set(100000,
3564 rmv_port_callback, (void *)(intptr_t)port_id))
3566 "Could not set up deferred device removal\n");
3568 case RTE_DEV_EVENT_ADD:
3569 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3571 /* TODO: After finish kernel driver binding,
3572 * begin to attach port.
3581 rxtx_port_config(struct rte_port *port)
3586 for (qid = 0; qid < nb_rxq; qid++) {
3587 offloads = port->rx_conf[qid].offloads;
3588 port->rx_conf[qid] = port->dev_info.default_rxconf;
3590 port->rx_conf[qid].offloads = offloads;
3592 /* Check if any Rx parameters have been passed */
3593 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3594 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3596 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3597 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3599 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3600 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3602 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3603 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3605 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3606 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3608 port->nb_rx_desc[qid] = nb_rxd;
3611 for (qid = 0; qid < nb_txq; qid++) {
3612 offloads = port->tx_conf[qid].offloads;
3613 port->tx_conf[qid] = port->dev_info.default_txconf;
3615 port->tx_conf[qid].offloads = offloads;
3617 /* Check if any Tx parameters have been passed */
3618 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3619 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3621 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3622 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3624 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3625 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3627 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3628 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3630 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3631 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3633 port->nb_tx_desc[qid] = nb_txd;
3638 * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
3639 * MTU is also aligned if JUMBO_FRAME offload is not set.
3641 * port->dev_info should be set before calling this function.
3643 * return 0 on success, negative on error
3646 update_jumbo_frame_offload(portid_t portid)
3648 struct rte_port *port = &ports[portid];
3649 uint32_t eth_overhead;
3650 uint64_t rx_offloads;
3654 /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
3655 if (port->dev_info.max_mtu != UINT16_MAX &&
3656 port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
3657 eth_overhead = port->dev_info.max_rx_pktlen -
3658 port->dev_info.max_mtu;
3660 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3662 rx_offloads = port->dev_conf.rxmode.offloads;
3664 /* Default config value is 0 to use PMD specific overhead */
3665 if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
3666 port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
3668 if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
3669 rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3672 if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3674 "Frame size (%u) is not supported by port %u\n",
3675 port->dev_conf.rxmode.max_rx_pkt_len,
3679 rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3683 if (rx_offloads != port->dev_conf.rxmode.offloads) {
3686 port->dev_conf.rxmode.offloads = rx_offloads;
3688 /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
3689 for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
3691 port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3693 port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3697 /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
3698 * if unset do it here
3700 if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3701 ret = eth_dev_set_mtu_mp(portid,
3702 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
3705 "Failed to set MTU to %u for port %u\n",
3706 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
3714 init_port_config(void)
3717 struct rte_port *port;
3720 RTE_ETH_FOREACH_DEV(pid) {
3722 port->dev_conf.fdir_conf = fdir_conf;
3724 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3729 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3730 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3731 rss_hf & port->dev_info.flow_type_rss_offloads;
3733 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3734 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3737 if (port->dcb_flag == 0) {
3738 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3739 port->dev_conf.rxmode.mq_mode =
3740 (enum rte_eth_rx_mq_mode)
3741 (rx_mq_mode & ETH_MQ_RX_RSS);
3743 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3746 rxtx_port_config(port);
3748 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3752 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3753 rte_pmd_ixgbe_bypass_init(pid);
3756 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3757 port->dev_conf.intr_conf.lsc = 1;
3758 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3759 port->dev_conf.intr_conf.rmv = 1;
3763 void set_port_slave_flag(portid_t slave_pid)
3765 struct rte_port *port;
3767 port = &ports[slave_pid];
3768 port->slave_flag = 1;
3771 void clear_port_slave_flag(portid_t slave_pid)
3773 struct rte_port *port;
3775 port = &ports[slave_pid];
3776 port->slave_flag = 0;
3779 uint8_t port_is_bonding_slave(portid_t slave_pid)
3781 struct rte_port *port;
3782 struct rte_eth_dev_info dev_info;
3785 port = &ports[slave_pid];
3786 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3789 "Failed to get device info for port id %d,"
3790 "cannot determine if the port is a bonded slave",
3794 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3799 const uint16_t vlan_tags[] = {
3800 0, 1, 2, 3, 4, 5, 6, 7,
3801 8, 9, 10, 11, 12, 13, 14, 15,
3802 16, 17, 18, 19, 20, 21, 22, 23,
3803 24, 25, 26, 27, 28, 29, 30, 31
3807 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3808 enum dcb_mode_enable dcb_mode,
3809 enum rte_eth_nb_tcs num_tcs,
3814 struct rte_eth_rss_conf rss_conf;
3817 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3818 * given above, and the number of traffic classes available for use.
3820 if (dcb_mode == DCB_VT_ENABLED) {
3821 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3822 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3823 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3824 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3826 /* VMDQ+DCB RX and TX configurations */
3827 vmdq_rx_conf->enable_default_pool = 0;
3828 vmdq_rx_conf->default_pool = 0;
3829 vmdq_rx_conf->nb_queue_pools =
3830 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3831 vmdq_tx_conf->nb_queue_pools =
3832 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3834 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3835 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3836 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3837 vmdq_rx_conf->pool_map[i].pools =
3838 1 << (i % vmdq_rx_conf->nb_queue_pools);
3840 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3841 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3842 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3845 /* set DCB mode of RX and TX of multiple queues */
3846 eth_conf->rxmode.mq_mode =
3847 (enum rte_eth_rx_mq_mode)
3848 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3849 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3851 struct rte_eth_dcb_rx_conf *rx_conf =
3852 ð_conf->rx_adv_conf.dcb_rx_conf;
3853 struct rte_eth_dcb_tx_conf *tx_conf =
3854 ð_conf->tx_adv_conf.dcb_tx_conf;
3856 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3858 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3862 rx_conf->nb_tcs = num_tcs;
3863 tx_conf->nb_tcs = num_tcs;
3865 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3866 rx_conf->dcb_tc[i] = i % num_tcs;
3867 tx_conf->dcb_tc[i] = i % num_tcs;
3870 eth_conf->rxmode.mq_mode =
3871 (enum rte_eth_rx_mq_mode)
3872 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3873 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3874 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3878 eth_conf->dcb_capability_en =
3879 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3881 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3887 init_port_dcb_config(portid_t pid,
3888 enum dcb_mode_enable dcb_mode,
3889 enum rte_eth_nb_tcs num_tcs,
3892 struct rte_eth_conf port_conf;
3893 struct rte_port *rte_port;
3897 if (num_procs > 1) {
3898 printf("The multi-process feature doesn't support dcb.\n");
3901 rte_port = &ports[pid];
3903 /* retain the original device configuration. */
3904 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3906 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3907 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3910 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3912 /* re-configure the device . */
3913 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3917 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3921 /* If dev_info.vmdq_pool_base is greater than 0,
3922 * the queue id of vmdq pools is started after pf queues.
3924 if (dcb_mode == DCB_VT_ENABLED &&
3925 rte_port->dev_info.vmdq_pool_base > 0) {
3927 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3932 /* Assume the ports in testpmd have the same dcb capability
3933 * and has the same number of rxq and txq in dcb mode
3935 if (dcb_mode == DCB_VT_ENABLED) {
3936 if (rte_port->dev_info.max_vfs > 0) {
3937 nb_rxq = rte_port->dev_info.nb_rx_queues;
3938 nb_txq = rte_port->dev_info.nb_tx_queues;
3940 nb_rxq = rte_port->dev_info.max_rx_queues;
3941 nb_txq = rte_port->dev_info.max_tx_queues;
3944 /*if vt is disabled, use all pf queues */
3945 if (rte_port->dev_info.vmdq_pool_base == 0) {
3946 nb_rxq = rte_port->dev_info.max_rx_queues;
3947 nb_txq = rte_port->dev_info.max_tx_queues;
3949 nb_rxq = (queueid_t)num_tcs;
3950 nb_txq = (queueid_t)num_tcs;
3954 rx_free_thresh = 64;
3956 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3958 rxtx_port_config(rte_port);
3960 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3961 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3962 rx_vft_set(pid, vlan_tags[i], 1);
3964 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3968 rte_port->dcb_flag = 1;
3970 /* Enter DCB configuration status */
3981 /* Configuration of Ethernet ports. */
3982 ports = rte_zmalloc("testpmd: ports",
3983 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3984 RTE_CACHE_LINE_SIZE);
3985 if (ports == NULL) {
3986 rte_exit(EXIT_FAILURE,
3987 "rte_zmalloc(%d struct rte_port) failed\n",
3990 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3991 ports[i].xstats_info.allocated = false;
3992 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3993 LIST_INIT(&ports[i].flow_tunnel_list);
3994 /* Initialize ports NUMA structures */
3995 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3996 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3997 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4011 const char clr[] = { 27, '[', '2', 'J', '\0' };
4012 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4014 /* Clear screen and move to top left */
4015 printf("%s%s", clr, top_left);
4017 printf("\nPort statistics ====================================");
4018 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4019 nic_stats_display(fwd_ports_ids[i]);
4025 signal_handler(int signum)
4027 if (signum == SIGINT || signum == SIGTERM) {
4028 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4030 #ifdef RTE_LIB_PDUMP
4031 /* uninitialize packet capture framework */
4034 #ifdef RTE_LIB_LATENCYSTATS
4035 if (latencystats_enabled != 0)
4036 rte_latencystats_uninit();
4039 /* Set flag to indicate the force termination. */
4041 /* exit with the expected status */
4042 #ifndef RTE_EXEC_ENV_WINDOWS
4043 signal(signum, SIG_DFL);
4044 kill(getpid(), signum);
4050 main(int argc, char** argv)
4057 signal(SIGINT, signal_handler);
4058 signal(SIGTERM, signal_handler);
4060 testpmd_logtype = rte_log_register("testpmd");
4061 if (testpmd_logtype < 0)
4062 rte_exit(EXIT_FAILURE, "Cannot register log type");
4063 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4065 diag = rte_eal_init(argc, argv);
4067 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4068 rte_strerror(rte_errno));
4070 ret = register_eth_event_callback();
4072 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4074 #ifdef RTE_LIB_PDUMP
4075 /* initialize packet capture framework */
4080 RTE_ETH_FOREACH_DEV(port_id) {
4081 ports_ids[count] = port_id;
4084 nb_ports = (portid_t) count;
4086 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4088 /* allocate port structures, and init them */
4091 set_def_fwd_config();
4093 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4094 "Check the core mask argument\n");
4096 /* Bitrate/latency stats disabled by default */
4097 #ifdef RTE_LIB_BITRATESTATS
4098 bitrate_enabled = 0;
4100 #ifdef RTE_LIB_LATENCYSTATS
4101 latencystats_enabled = 0;
4104 /* on FreeBSD, mlockall() is disabled by default */
4105 #ifdef RTE_EXEC_ENV_FREEBSD
4114 launch_args_parse(argc, argv);
4116 #ifndef RTE_EXEC_ENV_WINDOWS
4117 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4118 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4123 if (tx_first && interactive)
4124 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4125 "interactive mode.\n");
4127 if (tx_first && lsc_interrupt) {
4129 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4133 if (!nb_rxq && !nb_txq)
4135 "Warning: Either rx or tx queues should be non-zero\n");
4137 if (nb_rxq > 1 && nb_rxq > nb_txq)
4139 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4145 ret = rte_dev_hotplug_handle_enable();
4148 "fail to enable hotplug handling.");
4152 ret = rte_dev_event_monitor_start();
4155 "fail to start device event monitoring.");
4159 ret = rte_dev_event_callback_register(NULL,
4160 dev_event_callback, NULL);
4163 "fail to register device event callback\n");
4168 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4169 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4171 /* set all ports to promiscuous mode by default */
4172 RTE_ETH_FOREACH_DEV(port_id) {
4173 ret = rte_eth_promiscuous_enable(port_id);
4176 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4177 port_id, rte_strerror(-ret));
4180 /* Init metrics library */
4181 rte_metrics_init(rte_socket_id());
4183 #ifdef RTE_LIB_LATENCYSTATS
4184 if (latencystats_enabled != 0) {
4185 int ret = rte_latencystats_init(1, NULL);
4188 "Warning: latencystats init() returned error %d\n",
4190 fprintf(stderr, "Latencystats running on lcore %d\n",
4191 latencystats_lcore_id);
4195 /* Setup bitrate stats */
4196 #ifdef RTE_LIB_BITRATESTATS
4197 if (bitrate_enabled != 0) {
4198 bitrate_data = rte_stats_bitrate_create();
4199 if (bitrate_data == NULL)
4200 rte_exit(EXIT_FAILURE,
4201 "Could not allocate bitrate data.\n");
4202 rte_stats_bitrate_reg(bitrate_data);
4206 #ifdef RTE_LIB_CMDLINE
4207 if (strlen(cmdline_filename) != 0)
4208 cmdline_read_from_file(cmdline_filename);
4210 if (interactive == 1) {
4212 printf("Start automatic packet forwarding\n");
4213 start_packet_forwarding(0);
4225 printf("No commandline core given, start packet forwarding\n");
4226 start_packet_forwarding(tx_first);
4227 if (stats_period != 0) {
4228 uint64_t prev_time = 0, cur_time, diff_time = 0;
4229 uint64_t timer_period;
4231 /* Convert to number of cycles */
4232 timer_period = stats_period * rte_get_timer_hz();
4234 while (f_quit == 0) {
4235 cur_time = rte_get_timer_cycles();
4236 diff_time += cur_time - prev_time;
4238 if (diff_time >= timer_period) {
4240 /* Reset the timer */
4243 /* Sleep to avoid unnecessary checks */
4244 prev_time = cur_time;
4245 rte_delay_us_sleep(US_PER_S);
4249 printf("Press enter to exit\n");
4250 rc = read(0, &c, 1);
4256 ret = rte_eal_cleanup();
4258 rte_exit(EXIT_FAILURE,
4259 "EAL cleanup failed: %s\n", strerror(-ret));
4261 return EXIT_SUCCESS;