1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
73 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
74 #define HUGE_FLAG (0x40000)
76 #define HUGE_FLAG MAP_HUGETLB
79 #ifndef MAP_HUGE_SHIFT
80 /* older kernels (or FreeBSD) will not have this define */
81 #define HUGE_SHIFT (26)
83 #define HUGE_SHIFT MAP_HUGE_SHIFT
86 #define EXTMEM_HEAP_NAME "extmem"
88 * Zone size with the malloc overhead (max of debug and release variants)
89 * must fit into the smallest supported hugepage size (2M),
90 * so that an IOVA-contiguous zone of this size can always be allocated
91 * if there are free 2M hugepages.
93 #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
95 uint16_t verbose_level = 0; /**< Silent by default. */
96 int testpmd_logtype; /**< Log type for testpmd logs */
98 /* use main core for command line ? */
99 uint8_t interactive = 0;
100 uint8_t auto_start = 0;
102 char cmdline_filename[PATH_MAX] = {0};
105 * NUMA support configuration.
106 * When set, the NUMA support attempts to dispatch the allocation of the
107 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
108 * probed ports among the CPU sockets 0 and 1.
109 * Otherwise, all memory is allocated from CPU socket 0.
111 uint8_t numa_support = 1; /**< numa enabled by default */
114 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
117 uint8_t socket_num = UMA_NO_CONFIG;
120 * Select mempool allocation type:
121 * - native: use regular DPDK memory
122 * - anon: use regular DPDK memory to create mempool, but populate using
123 * anonymous memory (may not be IOVA-contiguous)
124 * - xmem: use externally allocated hugepage memory
126 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
129 * Store specified sockets on which memory pool to be used by ports
132 uint8_t port_numa[RTE_MAX_ETHPORTS];
135 * Store specified sockets on which RX ring to be used by ports
138 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
141 * Store specified sockets on which TX ring to be used by ports
144 uint8_t txring_numa[RTE_MAX_ETHPORTS];
147 * Record the Ethernet address of peer target ports to which packets are
149 * Must be instantiated with the ethernet addresses of peer traffic generator
152 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
153 portid_t nb_peer_eth_addrs = 0;
156 * Probed Target Environment.
158 struct rte_port *ports; /**< For all probed ethernet ports. */
159 portid_t nb_ports; /**< Number of probed ethernet ports. */
160 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
161 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
163 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
166 * Test Forwarding Configuration.
167 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
168 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
170 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
171 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
172 portid_t nb_cfg_ports; /**< Number of configured ports. */
173 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
175 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
176 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
178 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
179 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
182 * Forwarding engines.
184 struct fwd_engine * fwd_engines[] = {
194 &five_tuple_swap_fwd_engine,
195 #ifdef RTE_LIBRTE_IEEE1588
196 &ieee1588_fwd_engine,
202 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
203 uint16_t mempool_flags;
205 struct fwd_config cur_fwd_config;
206 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
207 uint32_t retry_enabled;
208 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
209 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
211 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
212 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
213 DEFAULT_MBUF_DATA_SIZE
214 }; /**< Mbuf data space size. */
215 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
216 * specified on command-line. */
217 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
219 /** Extended statistics to show. */
220 struct rte_eth_xstat_name *xstats_display;
222 unsigned int xstats_display_num; /**< Size of extended statistics to show */
225 * In container, it cannot terminate the process which running with 'stats-period'
226 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
231 * Max Rx frame size, set by '--max-pkt-len' parameter.
233 uint32_t max_rx_pkt_len;
236 * Configuration of packet segments used to scatter received packets
237 * if some of split features is configured.
239 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
240 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
241 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
242 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
245 * Configuration of packet segments used by the "txonly" processing engine.
247 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
248 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
249 TXONLY_DEF_PACKET_LEN,
251 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
253 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
254 /**< Split policy for packets to TX. */
256 uint8_t txonly_multi_flow;
257 /**< Whether multiple flows are generated in TXONLY mode. */
259 uint32_t tx_pkt_times_inter;
260 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
262 uint32_t tx_pkt_times_intra;
263 /**< Timings for send scheduling in TXONLY mode, time between packets. */
265 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
266 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
267 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
268 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
270 /* current configuration is in DCB or not,0 means it is not in DCB mode */
271 uint8_t dcb_config = 0;
274 * Configurable number of RX/TX queues.
276 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
277 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
278 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
281 * Configurable number of RX/TX ring descriptors.
282 * Defaults are supplied by drivers via ethdev.
284 #define RTE_TEST_RX_DESC_DEFAULT 0
285 #define RTE_TEST_TX_DESC_DEFAULT 0
286 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
287 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
289 #define RTE_PMD_PARAM_UNSET -1
291 * Configurable values of RX and TX ring threshold registers.
294 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
295 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
296 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
298 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
299 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
300 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
303 * Configurable value of RX free threshold.
305 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
308 * Configurable value of RX drop enable.
310 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
313 * Configurable value of TX free threshold.
315 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
318 * Configurable value of TX RS bit threshold.
320 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
323 * Configurable value of buffered packets before sending.
325 uint16_t noisy_tx_sw_bufsz;
328 * Configurable value of packet buffer timeout.
330 uint16_t noisy_tx_sw_buf_flush_time;
333 * Configurable value for size of VNF internal memory area
334 * used for simulating noisy neighbour behaviour
336 uint64_t noisy_lkup_mem_sz;
339 * Configurable value of number of random writes done in
340 * VNF simulation memory area.
342 uint64_t noisy_lkup_num_writes;
345 * Configurable value of number of random reads done in
346 * VNF simulation memory area.
348 uint64_t noisy_lkup_num_reads;
351 * Configurable value of number of random reads/writes done in
352 * VNF simulation memory area.
354 uint64_t noisy_lkup_num_reads_writes;
357 * Receive Side Scaling (RSS) configuration.
359 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
362 * Port topology configuration
364 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
367 * Avoids to flush all the RX streams before starts forwarding.
369 uint8_t no_flush_rx = 0; /* flush by default */
372 * Flow API isolated mode.
374 uint8_t flow_isolate_all;
377 * Avoids to check link status when starting/stopping a port.
379 uint8_t no_link_check = 0; /* check by default */
382 * Don't automatically start all ports in interactive mode.
384 uint8_t no_device_start = 0;
387 * Enable link status change notification
389 uint8_t lsc_interrupt = 1; /* enabled by default */
392 * Enable device removal notification.
394 uint8_t rmv_interrupt = 1; /* enabled by default */
396 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
398 /* After attach, port setup is called on event or by iterator */
399 bool setup_on_probe_event = true;
401 /* Clear ptypes on port initialization. */
402 uint8_t clear_ptypes = true;
404 /* Hairpin ports configuration mode. */
405 uint16_t hairpin_mode;
407 /* Pretty printing of ethdev events */
408 static const char * const eth_event_desc[] = {
409 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
410 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
411 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
412 [RTE_ETH_EVENT_INTR_RESET] = "reset",
413 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
414 [RTE_ETH_EVENT_IPSEC] = "IPsec",
415 [RTE_ETH_EVENT_MACSEC] = "MACsec",
416 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
417 [RTE_ETH_EVENT_NEW] = "device probed",
418 [RTE_ETH_EVENT_DESTROY] = "device released",
419 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
420 [RTE_ETH_EVENT_MAX] = NULL,
424 * Display or mask ether events
425 * Default to all events except VF_MBOX
427 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
428 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
429 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
430 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
431 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
432 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
433 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
434 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
436 * Decide if all memory are locked for performance.
441 * NIC bypass mode configuration options.
444 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
445 /* The NIC bypass watchdog timeout. */
446 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
450 #ifdef RTE_LIB_LATENCYSTATS
453 * Set when latency stats is enabled in the commandline
455 uint8_t latencystats_enabled;
458 * Lcore ID to service latency statistics.
460 lcoreid_t latencystats_lcore_id = -1;
465 * Ethernet device configuration.
467 struct rte_eth_rxmode rx_mode;
469 struct rte_eth_txmode tx_mode = {
470 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
473 struct rte_eth_fdir_conf fdir_conf = {
474 .mode = RTE_FDIR_MODE_NONE,
475 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
476 .status = RTE_FDIR_REPORT_STATUS,
478 .vlan_tci_mask = 0xFFEF,
480 .src_ip = 0xFFFFFFFF,
481 .dst_ip = 0xFFFFFFFF,
484 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
485 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
487 .src_port_mask = 0xFFFF,
488 .dst_port_mask = 0xFFFF,
489 .mac_addr_byte_mask = 0xFF,
490 .tunnel_type_mask = 1,
491 .tunnel_id_mask = 0xFFFFFFFF,
496 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
499 * Display zero values by default for xstats
501 uint8_t xstats_hide_zero;
504 * Measure of CPU cycles disabled by default
506 uint8_t record_core_cycles;
509 * Display of RX and TX bursts disabled by default
511 uint8_t record_burst_stats;
514 * Number of ports per shared Rx queue group, 0 disable.
518 unsigned int num_sockets = 0;
519 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
521 #ifdef RTE_LIB_BITRATESTATS
522 /* Bitrate statistics */
523 struct rte_stats_bitrates *bitrate_data;
524 lcoreid_t bitrate_lcore_id;
525 uint8_t bitrate_enabled;
529 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
530 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
534 * hexadecimal bitmask of RX mq mode can be enabled.
536 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
539 * Used to set forced link speed
541 uint32_t eth_link_speed;
544 * ID of the current process in multi-process, used to
545 * configure the queues to be polled.
550 * Number of processes in multi-process, used to
551 * configure the queues to be polled.
553 unsigned int num_procs = 1;
556 eth_rx_metadata_negotiate_mp(uint16_t port_id)
558 uint64_t rx_meta_features = 0;
561 if (!is_proc_primary())
564 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
565 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
566 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
568 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
570 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
571 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
575 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
576 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
580 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
581 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
584 } else if (ret != -ENOTSUP) {
585 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
586 port_id, rte_strerror(-ret));
591 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
592 const struct rte_eth_conf *dev_conf)
594 if (is_proc_primary())
595 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
601 eth_dev_start_mp(uint16_t port_id)
603 if (is_proc_primary())
604 return rte_eth_dev_start(port_id);
610 eth_dev_stop_mp(uint16_t port_id)
612 if (is_proc_primary())
613 return rte_eth_dev_stop(port_id);
619 mempool_free_mp(struct rte_mempool *mp)
621 if (is_proc_primary())
622 rte_mempool_free(mp);
626 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
628 if (is_proc_primary())
629 return rte_eth_dev_set_mtu(port_id, mtu);
634 /* Forward function declarations */
635 static void setup_attached_port(portid_t pi);
636 static void check_all_ports_link_status(uint32_t port_mask);
637 static int eth_event_callback(portid_t port_id,
638 enum rte_eth_event_type type,
639 void *param, void *ret_param);
640 static void dev_event_callback(const char *device_name,
641 enum rte_dev_event_type type,
643 static void fill_xstats_display_info(void);
646 * Check if all the ports are started.
647 * If yes, return positive value. If not, return zero.
649 static int all_ports_started(void);
652 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
653 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
656 /* Holds the registered mbuf dynamic flags names. */
657 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
661 * Helper function to check if socket is already discovered.
662 * If yes, return positive value. If not, return zero.
665 new_socket_id(unsigned int socket_id)
669 for (i = 0; i < num_sockets; i++) {
670 if (socket_ids[i] == socket_id)
677 * Setup default configuration.
680 set_default_fwd_lcores_config(void)
684 unsigned int sock_num;
687 for (i = 0; i < RTE_MAX_LCORE; i++) {
688 if (!rte_lcore_is_enabled(i))
690 sock_num = rte_lcore_to_socket_id(i);
691 if (new_socket_id(sock_num)) {
692 if (num_sockets >= RTE_MAX_NUMA_NODES) {
693 rte_exit(EXIT_FAILURE,
694 "Total sockets greater than %u\n",
697 socket_ids[num_sockets++] = sock_num;
699 if (i == rte_get_main_lcore())
701 fwd_lcores_cpuids[nb_lc++] = i;
703 nb_lcores = (lcoreid_t) nb_lc;
704 nb_cfg_lcores = nb_lcores;
709 set_def_peer_eth_addrs(void)
713 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
714 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
715 peer_eth_addrs[i].addr_bytes[5] = i;
720 set_default_fwd_ports_config(void)
725 RTE_ETH_FOREACH_DEV(pt_id) {
726 fwd_ports_ids[i++] = pt_id;
728 /* Update sockets info according to the attached device */
729 int socket_id = rte_eth_dev_socket_id(pt_id);
730 if (socket_id >= 0 && new_socket_id(socket_id)) {
731 if (num_sockets >= RTE_MAX_NUMA_NODES) {
732 rte_exit(EXIT_FAILURE,
733 "Total sockets greater than %u\n",
736 socket_ids[num_sockets++] = socket_id;
740 nb_cfg_ports = nb_ports;
741 nb_fwd_ports = nb_ports;
745 set_def_fwd_config(void)
747 set_default_fwd_lcores_config();
748 set_def_peer_eth_addrs();
749 set_default_fwd_ports_config();
752 #ifndef RTE_EXEC_ENV_WINDOWS
753 /* extremely pessimistic estimation of memory required to create a mempool */
755 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
757 unsigned int n_pages, mbuf_per_pg, leftover;
758 uint64_t total_mem, mbuf_mem, obj_sz;
760 /* there is no good way to predict how much space the mempool will
761 * occupy because it will allocate chunks on the fly, and some of those
762 * will come from default DPDK memory while some will come from our
763 * external memory, so just assume 128MB will be enough for everyone.
765 uint64_t hdr_mem = 128 << 20;
767 /* account for possible non-contiguousness */
768 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
770 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
774 mbuf_per_pg = pgsz / obj_sz;
775 leftover = (nb_mbufs % mbuf_per_pg) > 0;
776 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
778 mbuf_mem = n_pages * pgsz;
780 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
782 if (total_mem > SIZE_MAX) {
783 TESTPMD_LOG(ERR, "Memory size too big\n");
786 *out = (size_t)total_mem;
792 pagesz_flags(uint64_t page_sz)
794 /* as per mmap() manpage, all page sizes are log2 of page size
795 * shifted by MAP_HUGE_SHIFT
797 int log2 = rte_log2_u64(page_sz);
799 return (log2 << HUGE_SHIFT);
803 alloc_mem(size_t memsz, size_t pgsz, bool huge)
808 /* allocate anonymous hugepages */
809 flags = MAP_ANONYMOUS | MAP_PRIVATE;
811 flags |= HUGE_FLAG | pagesz_flags(pgsz);
813 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
814 if (addr == MAP_FAILED)
820 struct extmem_param {
824 rte_iova_t *iova_table;
825 unsigned int iova_table_len;
829 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
832 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
833 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
834 unsigned int cur_page, n_pages, pgsz_idx;
835 size_t mem_sz, cur_pgsz;
836 rte_iova_t *iovas = NULL;
840 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
841 /* skip anything that is too big */
842 if (pgsizes[pgsz_idx] > SIZE_MAX)
845 cur_pgsz = pgsizes[pgsz_idx];
847 /* if we were told not to allocate hugepages, override */
849 cur_pgsz = sysconf(_SC_PAGESIZE);
851 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
853 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
857 /* allocate our memory */
858 addr = alloc_mem(mem_sz, cur_pgsz, huge);
860 /* if we couldn't allocate memory with a specified page size,
861 * that doesn't mean we can't do it with other page sizes, so
867 /* store IOVA addresses for every page in this memory area */
868 n_pages = mem_sz / cur_pgsz;
870 iovas = malloc(sizeof(*iovas) * n_pages);
873 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
876 /* lock memory if it's not huge pages */
880 /* populate IOVA addresses */
881 for (cur_page = 0; cur_page < n_pages; cur_page++) {
886 offset = cur_pgsz * cur_page;
887 cur = RTE_PTR_ADD(addr, offset);
889 /* touch the page before getting its IOVA */
890 *(volatile char *)cur = 0;
892 iova = rte_mem_virt2iova(cur);
894 iovas[cur_page] = iova;
899 /* if we couldn't allocate anything */
905 param->pgsz = cur_pgsz;
906 param->iova_table = iovas;
907 param->iova_table_len = n_pages;
914 munmap(addr, mem_sz);
920 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
922 struct extmem_param param;
925 memset(¶m, 0, sizeof(param));
927 /* check if our heap exists */
928 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
930 /* create our heap */
931 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
933 TESTPMD_LOG(ERR, "Cannot create heap\n");
938 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
940 TESTPMD_LOG(ERR, "Cannot create memory area\n");
944 /* we now have a valid memory area, so add it to heap */
945 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
946 param.addr, param.len, param.iova_table,
947 param.iova_table_len, param.pgsz);
949 /* when using VFIO, memory is automatically mapped for DMA by EAL */
951 /* not needed any more */
952 free(param.iova_table);
955 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
956 munmap(param.addr, param.len);
962 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
968 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
969 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
974 RTE_ETH_FOREACH_DEV(pid) {
975 struct rte_eth_dev_info dev_info;
977 ret = eth_dev_info_get_print_err(pid, &dev_info);
980 "unable to get device info for port %d on addr 0x%p,"
981 "mempool unmapping will not be performed\n",
986 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
989 "unable to DMA unmap addr 0x%p "
991 memhdr->addr, dev_info.device->name);
994 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
997 "unable to un-register addr 0x%p\n", memhdr->addr);
1002 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1003 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1006 size_t page_size = sysconf(_SC_PAGESIZE);
1009 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1013 "unable to register addr 0x%p\n", memhdr->addr);
1016 RTE_ETH_FOREACH_DEV(pid) {
1017 struct rte_eth_dev_info dev_info;
1019 ret = eth_dev_info_get_print_err(pid, &dev_info);
1022 "unable to get device info for port %d on addr 0x%p,"
1023 "mempool mapping will not be performed\n",
1027 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1030 "unable to DMA map addr 0x%p "
1032 memhdr->addr, dev_info.device->name);
1039 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1040 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1042 struct rte_pktmbuf_extmem *xmem;
1043 unsigned int ext_num, zone_num, elt_num;
1046 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1047 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1048 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1050 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1052 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1053 "external buffer descriptors\n");
1057 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1058 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1059 const struct rte_memzone *mz;
1060 char mz_name[RTE_MEMZONE_NAMESIZE];
1063 ret = snprintf(mz_name, sizeof(mz_name),
1064 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1065 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1066 errno = ENAMETOOLONG;
1070 mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
1072 RTE_MEMZONE_IOVA_CONTIG |
1074 RTE_MEMZONE_SIZE_HINT_ONLY);
1077 * The caller exits on external buffer creation
1078 * error, so there is no need to free memzones.
1084 xseg->buf_ptr = mz->addr;
1085 xseg->buf_iova = mz->iova;
1086 xseg->buf_len = EXTBUF_ZONE_SIZE;
1087 xseg->elt_size = elt_size;
1089 if (ext_num == 0 && xmem != NULL) {
1098 * Configuration initialisation done once at init time.
1100 static struct rte_mempool *
1101 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1102 unsigned int socket_id, uint16_t size_idx)
1104 char pool_name[RTE_MEMPOOL_NAMESIZE];
1105 struct rte_mempool *rte_mp = NULL;
1106 #ifndef RTE_EXEC_ENV_WINDOWS
1109 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1111 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1112 if (!is_proc_primary()) {
1113 rte_mp = rte_mempool_lookup(pool_name);
1115 rte_exit(EXIT_FAILURE,
1116 "Get mbuf pool for socket %u failed: %s\n",
1117 socket_id, rte_strerror(rte_errno));
1122 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1123 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1125 switch (mp_alloc_type) {
1126 case MP_ALLOC_NATIVE:
1128 /* wrapper to rte_mempool_create() */
1129 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1130 rte_mbuf_best_mempool_ops());
1131 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1132 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1135 #ifndef RTE_EXEC_ENV_WINDOWS
1138 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1139 mb_size, (unsigned int) mb_mempool_cache,
1140 sizeof(struct rte_pktmbuf_pool_private),
1141 socket_id, mempool_flags);
1145 if (rte_mempool_populate_anon(rte_mp) == 0) {
1146 rte_mempool_free(rte_mp);
1150 rte_pktmbuf_pool_init(rte_mp, NULL);
1151 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1152 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1156 case MP_ALLOC_XMEM_HUGE:
1159 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1161 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1162 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1165 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1166 if (heap_socket < 0)
1167 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1169 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1170 rte_mbuf_best_mempool_ops());
1171 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1172 mb_mempool_cache, 0, mbuf_seg_size,
1179 struct rte_pktmbuf_extmem *ext_mem;
1180 unsigned int ext_num;
1182 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1183 socket_id, pool_name, &ext_mem);
1185 rte_exit(EXIT_FAILURE,
1186 "Can't create pinned data buffers\n");
1188 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1189 rte_mbuf_best_mempool_ops());
1190 rte_mp = rte_pktmbuf_pool_create_extbuf
1191 (pool_name, nb_mbuf, mb_mempool_cache,
1192 0, mbuf_seg_size, socket_id,
1199 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1203 #ifndef RTE_EXEC_ENV_WINDOWS
1206 if (rte_mp == NULL) {
1207 rte_exit(EXIT_FAILURE,
1208 "Creation of mbuf pool for socket %u failed: %s\n",
1209 socket_id, rte_strerror(rte_errno));
1210 } else if (verbose_level > 0) {
1211 rte_mempool_dump(stdout, rte_mp);
1217 * Check given socket id is valid or not with NUMA mode,
1218 * if valid, return 0, else return -1
1221 check_socket_id(const unsigned int socket_id)
1223 static int warning_once = 0;
1225 if (new_socket_id(socket_id)) {
1226 if (!warning_once && numa_support)
1228 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1236 * Get the allowed maximum number of RX queues.
1237 * *pid return the port id which has minimal value of
1238 * max_rx_queues in all ports.
1241 get_allowed_max_nb_rxq(portid_t *pid)
1243 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1244 bool max_rxq_valid = false;
1246 struct rte_eth_dev_info dev_info;
1248 RTE_ETH_FOREACH_DEV(pi) {
1249 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1252 max_rxq_valid = true;
1253 if (dev_info.max_rx_queues < allowed_max_rxq) {
1254 allowed_max_rxq = dev_info.max_rx_queues;
1258 return max_rxq_valid ? allowed_max_rxq : 0;
1262 * Check input rxq is valid or not.
1263 * If input rxq is not greater than any of maximum number
1264 * of RX queues of all ports, it is valid.
1265 * if valid, return 0, else return -1
1268 check_nb_rxq(queueid_t rxq)
1270 queueid_t allowed_max_rxq;
1273 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1274 if (rxq > allowed_max_rxq) {
1276 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1277 rxq, allowed_max_rxq, pid);
1284 * Get the allowed maximum number of TX queues.
1285 * *pid return the port id which has minimal value of
1286 * max_tx_queues in all ports.
1289 get_allowed_max_nb_txq(portid_t *pid)
1291 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1292 bool max_txq_valid = false;
1294 struct rte_eth_dev_info dev_info;
1296 RTE_ETH_FOREACH_DEV(pi) {
1297 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1300 max_txq_valid = true;
1301 if (dev_info.max_tx_queues < allowed_max_txq) {
1302 allowed_max_txq = dev_info.max_tx_queues;
1306 return max_txq_valid ? allowed_max_txq : 0;
1310 * Check input txq is valid or not.
1311 * If input txq is not greater than any of maximum number
1312 * of TX queues of all ports, it is valid.
1313 * if valid, return 0, else return -1
1316 check_nb_txq(queueid_t txq)
1318 queueid_t allowed_max_txq;
1321 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1322 if (txq > allowed_max_txq) {
1324 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1325 txq, allowed_max_txq, pid);
1332 * Get the allowed maximum number of RXDs of every rx queue.
1333 * *pid return the port id which has minimal value of
1334 * max_rxd in all queues of all ports.
1337 get_allowed_max_nb_rxd(portid_t *pid)
1339 uint16_t allowed_max_rxd = UINT16_MAX;
1341 struct rte_eth_dev_info dev_info;
1343 RTE_ETH_FOREACH_DEV(pi) {
1344 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1347 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1348 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1352 return allowed_max_rxd;
1356 * Get the allowed minimal number of RXDs of every rx queue.
1357 * *pid return the port id which has minimal value of
1358 * min_rxd in all queues of all ports.
1361 get_allowed_min_nb_rxd(portid_t *pid)
1363 uint16_t allowed_min_rxd = 0;
1365 struct rte_eth_dev_info dev_info;
1367 RTE_ETH_FOREACH_DEV(pi) {
1368 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1371 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1372 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1377 return allowed_min_rxd;
1381 * Check input rxd is valid or not.
1382 * If input rxd is not greater than any of maximum number
1383 * of RXDs of every Rx queues and is not less than any of
1384 * minimal number of RXDs of every Rx queues, it is valid.
1385 * if valid, return 0, else return -1
1388 check_nb_rxd(queueid_t rxd)
1390 uint16_t allowed_max_rxd;
1391 uint16_t allowed_min_rxd;
1394 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1395 if (rxd > allowed_max_rxd) {
1397 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1398 rxd, allowed_max_rxd, pid);
1402 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1403 if (rxd < allowed_min_rxd) {
1405 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1406 rxd, allowed_min_rxd, pid);
1414 * Get the allowed maximum number of TXDs of every rx queues.
1415 * *pid return the port id which has minimal value of
1416 * max_txd in every tx queue.
1419 get_allowed_max_nb_txd(portid_t *pid)
1421 uint16_t allowed_max_txd = UINT16_MAX;
1423 struct rte_eth_dev_info dev_info;
1425 RTE_ETH_FOREACH_DEV(pi) {
1426 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1429 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1430 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1434 return allowed_max_txd;
1438 * Get the allowed maximum number of TXDs of every tx queues.
1439 * *pid return the port id which has minimal value of
1440 * min_txd in every tx queue.
1443 get_allowed_min_nb_txd(portid_t *pid)
1445 uint16_t allowed_min_txd = 0;
1447 struct rte_eth_dev_info dev_info;
1449 RTE_ETH_FOREACH_DEV(pi) {
1450 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1453 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1454 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1459 return allowed_min_txd;
1463 * Check input txd is valid or not.
1464 * If input txd is not greater than any of maximum number
1465 * of TXDs of every Rx queues, it is valid.
1466 * if valid, return 0, else return -1
1469 check_nb_txd(queueid_t txd)
1471 uint16_t allowed_max_txd;
1472 uint16_t allowed_min_txd;
1475 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1476 if (txd > allowed_max_txd) {
1478 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1479 txd, allowed_max_txd, pid);
1483 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1484 if (txd < allowed_min_txd) {
1486 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1487 txd, allowed_min_txd, pid);
1495 * Get the allowed maximum number of hairpin queues.
1496 * *pid return the port id which has minimal value of
1497 * max_hairpin_queues in all ports.
1500 get_allowed_max_nb_hairpinq(portid_t *pid)
1502 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1504 struct rte_eth_hairpin_cap cap;
1506 RTE_ETH_FOREACH_DEV(pi) {
1507 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1511 if (cap.max_nb_queues < allowed_max_hairpinq) {
1512 allowed_max_hairpinq = cap.max_nb_queues;
1516 return allowed_max_hairpinq;
1520 * Check input hairpin is valid or not.
1521 * If input hairpin is not greater than any of maximum number
1522 * of hairpin queues of all ports, it is valid.
1523 * if valid, return 0, else return -1
1526 check_nb_hairpinq(queueid_t hairpinq)
1528 queueid_t allowed_max_hairpinq;
1531 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1532 if (hairpinq > allowed_max_hairpinq) {
1534 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1535 hairpinq, allowed_max_hairpinq, pid);
1542 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1544 uint32_t eth_overhead;
1546 if (dev_info->max_mtu != UINT16_MAX &&
1547 dev_info->max_rx_pktlen > dev_info->max_mtu)
1548 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1550 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1552 return eth_overhead;
1556 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1558 struct rte_port *port = &ports[pid];
1562 eth_rx_metadata_negotiate_mp(pid);
1564 port->dev_conf.txmode = tx_mode;
1565 port->dev_conf.rxmode = rx_mode;
1567 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1569 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1571 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1572 port->dev_conf.txmode.offloads &=
1573 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1575 /* Apply Rx offloads configuration */
1576 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1577 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1578 /* Apply Tx offloads configuration */
1579 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1580 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1583 port->dev_conf.link_speeds = eth_link_speed;
1586 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1587 get_eth_overhead(&port->dev_info);
1589 /* set flag to initialize port/queue */
1590 port->need_reconfig = 1;
1591 port->need_reconfig_queues = 1;
1592 port->socket_id = socket_id;
1593 port->tx_metadata = 0;
1596 * Check for maximum number of segments per MTU.
1597 * Accordingly update the mbuf data size.
1599 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1600 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1601 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1604 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1605 uint16_t data_size = (mtu + eth_overhead) /
1606 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1607 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1609 if (buffer_size > mbuf_data_size[0]) {
1610 mbuf_data_size[0] = buffer_size;
1611 TESTPMD_LOG(WARNING,
1612 "Configured mbuf size of the first segment %hu\n",
1623 struct rte_mempool *mbp;
1624 unsigned int nb_mbuf_per_pool;
1627 struct rte_gro_param gro_param;
1633 /* Configuration of logical cores. */
1634 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1635 sizeof(struct fwd_lcore *) * nb_lcores,
1636 RTE_CACHE_LINE_SIZE);
1637 if (fwd_lcores == NULL) {
1638 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1639 "failed\n", nb_lcores);
1641 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1642 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1643 sizeof(struct fwd_lcore),
1644 RTE_CACHE_LINE_SIZE);
1645 if (fwd_lcores[lc_id] == NULL) {
1646 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1649 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1652 RTE_ETH_FOREACH_DEV(pid) {
1656 socket_id = port_numa[pid];
1657 if (port_numa[pid] == NUMA_NO_CONFIG) {
1658 socket_id = rte_eth_dev_socket_id(pid);
1661 * if socket_id is invalid,
1662 * set to the first available socket.
1664 if (check_socket_id(socket_id) < 0)
1665 socket_id = socket_ids[0];
1668 socket_id = (socket_num == UMA_NO_CONFIG) ?
1671 /* Apply default TxRx configuration for all ports */
1672 init_config_port_offloads(pid, socket_id);
1675 * Create pools of mbuf.
1676 * If NUMA support is disabled, create a single pool of mbuf in
1677 * socket 0 memory by default.
1678 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1680 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1681 * nb_txd can be configured at run time.
1683 if (param_total_num_mbufs)
1684 nb_mbuf_per_pool = param_total_num_mbufs;
1686 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1687 (nb_lcores * mb_mempool_cache) +
1688 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1689 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1695 for (i = 0; i < num_sockets; i++)
1696 for (j = 0; j < mbuf_data_size_n; j++)
1697 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1698 mbuf_pool_create(mbuf_data_size[j],
1704 for (i = 0; i < mbuf_data_size_n; i++)
1705 mempools[i] = mbuf_pool_create
1708 socket_num == UMA_NO_CONFIG ?
1715 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1716 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1719 * Records which Mbuf pool to use by each logical core, if needed.
1721 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1722 mbp = mbuf_pool_find(
1723 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1726 mbp = mbuf_pool_find(0, 0);
1727 fwd_lcores[lc_id]->mbp = mbp;
1729 /* initialize GSO context */
1730 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1731 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1732 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1733 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1735 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1742 /* create a gro context for each lcore */
1743 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1744 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1745 gro_param.max_item_per_flow = MAX_PKT_BURST;
1746 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1747 gro_param.socket_id = rte_lcore_to_socket_id(
1748 fwd_lcores_cpuids[lc_id]);
1749 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1750 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1751 rte_exit(EXIT_FAILURE,
1752 "rte_gro_ctx_create() failed\n");
1760 reconfig(portid_t new_port_id, unsigned socket_id)
1762 /* Reconfiguration of Ethernet ports. */
1763 init_config_port_offloads(new_port_id, socket_id);
1769 init_fwd_streams(void)
1772 struct rte_port *port;
1773 streamid_t sm_id, nb_fwd_streams_new;
1776 /* set socket id according to numa or not */
1777 RTE_ETH_FOREACH_DEV(pid) {
1779 if (nb_rxq > port->dev_info.max_rx_queues) {
1781 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1782 nb_rxq, port->dev_info.max_rx_queues);
1785 if (nb_txq > port->dev_info.max_tx_queues) {
1787 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1788 nb_txq, port->dev_info.max_tx_queues);
1792 if (port_numa[pid] != NUMA_NO_CONFIG)
1793 port->socket_id = port_numa[pid];
1795 port->socket_id = rte_eth_dev_socket_id(pid);
1798 * if socket_id is invalid,
1799 * set to the first available socket.
1801 if (check_socket_id(port->socket_id) < 0)
1802 port->socket_id = socket_ids[0];
1806 if (socket_num == UMA_NO_CONFIG)
1807 port->socket_id = 0;
1809 port->socket_id = socket_num;
1813 q = RTE_MAX(nb_rxq, nb_txq);
1816 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1819 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1820 if (nb_fwd_streams_new == nb_fwd_streams)
1823 if (fwd_streams != NULL) {
1824 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1825 if (fwd_streams[sm_id] == NULL)
1827 rte_free(fwd_streams[sm_id]);
1828 fwd_streams[sm_id] = NULL;
1830 rte_free(fwd_streams);
1835 nb_fwd_streams = nb_fwd_streams_new;
1836 if (nb_fwd_streams) {
1837 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1838 sizeof(struct fwd_stream *) * nb_fwd_streams,
1839 RTE_CACHE_LINE_SIZE);
1840 if (fwd_streams == NULL)
1841 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1842 " (struct fwd_stream *)) failed\n",
1845 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1846 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1847 " struct fwd_stream", sizeof(struct fwd_stream),
1848 RTE_CACHE_LINE_SIZE);
1849 if (fwd_streams[sm_id] == NULL)
1850 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1851 "(struct fwd_stream) failed\n");
1859 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1861 uint64_t total_burst, sburst;
1863 uint64_t burst_stats[4];
1864 uint16_t pktnb_stats[4];
1866 int burst_percent[4], sburstp;
1870 * First compute the total number of packet bursts and the
1871 * two highest numbers of bursts of the same number of packets.
1873 memset(&burst_stats, 0x0, sizeof(burst_stats));
1874 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1876 /* Show stats for 0 burst size always */
1877 total_burst = pbs->pkt_burst_spread[0];
1878 burst_stats[0] = pbs->pkt_burst_spread[0];
1881 /* Find the next 2 burst sizes with highest occurrences. */
1882 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1883 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1888 total_burst += nb_burst;
1890 if (nb_burst > burst_stats[1]) {
1891 burst_stats[2] = burst_stats[1];
1892 pktnb_stats[2] = pktnb_stats[1];
1893 burst_stats[1] = nb_burst;
1894 pktnb_stats[1] = nb_pkt;
1895 } else if (nb_burst > burst_stats[2]) {
1896 burst_stats[2] = nb_burst;
1897 pktnb_stats[2] = nb_pkt;
1900 if (total_burst == 0)
1903 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1904 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1906 printf("%d%% of other]\n", 100 - sburstp);
1910 sburst += burst_stats[i];
1911 if (sburst == total_burst) {
1912 printf("%d%% of %d pkts]\n",
1913 100 - sburstp, (int) pktnb_stats[i]);
1918 (double)burst_stats[i] / total_burst * 100;
1919 printf("%d%% of %d pkts + ",
1920 burst_percent[i], (int) pktnb_stats[i]);
1921 sburstp += burst_percent[i];
1926 fwd_stream_stats_display(streamid_t stream_id)
1928 struct fwd_stream *fs;
1929 static const char *fwd_top_stats_border = "-------";
1931 fs = fwd_streams[stream_id];
1932 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1933 (fs->fwd_dropped == 0))
1935 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1936 "TX Port=%2d/Queue=%2d %s\n",
1937 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1938 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1939 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1940 " TX-dropped: %-14"PRIu64,
1941 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1943 /* if checksum mode */
1944 if (cur_fwd_eng == &csum_fwd_engine) {
1945 printf(" RX- bad IP checksum: %-14"PRIu64
1946 " Rx- bad L4 checksum: %-14"PRIu64
1947 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1948 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1949 fs->rx_bad_outer_l4_csum);
1950 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1951 fs->rx_bad_outer_ip_csum);
1956 if (record_burst_stats) {
1957 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1958 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1963 fwd_stats_display(void)
1965 static const char *fwd_stats_border = "----------------------";
1966 static const char *acc_stats_border = "+++++++++++++++";
1968 struct fwd_stream *rx_stream;
1969 struct fwd_stream *tx_stream;
1970 uint64_t tx_dropped;
1971 uint64_t rx_bad_ip_csum;
1972 uint64_t rx_bad_l4_csum;
1973 uint64_t rx_bad_outer_l4_csum;
1974 uint64_t rx_bad_outer_ip_csum;
1975 } ports_stats[RTE_MAX_ETHPORTS];
1976 uint64_t total_rx_dropped = 0;
1977 uint64_t total_tx_dropped = 0;
1978 uint64_t total_rx_nombuf = 0;
1979 struct rte_eth_stats stats;
1980 uint64_t fwd_cycles = 0;
1981 uint64_t total_recv = 0;
1982 uint64_t total_xmit = 0;
1983 struct rte_port *port;
1988 memset(ports_stats, 0, sizeof(ports_stats));
1990 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1991 struct fwd_stream *fs = fwd_streams[sm_id];
1993 if (cur_fwd_config.nb_fwd_streams >
1994 cur_fwd_config.nb_fwd_ports) {
1995 fwd_stream_stats_display(sm_id);
1997 ports_stats[fs->tx_port].tx_stream = fs;
1998 ports_stats[fs->rx_port].rx_stream = fs;
2001 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2003 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2004 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2005 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2006 fs->rx_bad_outer_l4_csum;
2007 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2008 fs->rx_bad_outer_ip_csum;
2010 if (record_core_cycles)
2011 fwd_cycles += fs->core_cycles;
2013 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2014 pt_id = fwd_ports_ids[i];
2015 port = &ports[pt_id];
2017 rte_eth_stats_get(pt_id, &stats);
2018 stats.ipackets -= port->stats.ipackets;
2019 stats.opackets -= port->stats.opackets;
2020 stats.ibytes -= port->stats.ibytes;
2021 stats.obytes -= port->stats.obytes;
2022 stats.imissed -= port->stats.imissed;
2023 stats.oerrors -= port->stats.oerrors;
2024 stats.rx_nombuf -= port->stats.rx_nombuf;
2026 total_recv += stats.ipackets;
2027 total_xmit += stats.opackets;
2028 total_rx_dropped += stats.imissed;
2029 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2030 total_tx_dropped += stats.oerrors;
2031 total_rx_nombuf += stats.rx_nombuf;
2033 printf("\n %s Forward statistics for port %-2d %s\n",
2034 fwd_stats_border, pt_id, fwd_stats_border);
2036 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2037 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2038 stats.ipackets + stats.imissed);
2040 if (cur_fwd_eng == &csum_fwd_engine) {
2041 printf(" Bad-ipcsum: %-14"PRIu64
2042 " Bad-l4csum: %-14"PRIu64
2043 "Bad-outer-l4csum: %-14"PRIu64"\n",
2044 ports_stats[pt_id].rx_bad_ip_csum,
2045 ports_stats[pt_id].rx_bad_l4_csum,
2046 ports_stats[pt_id].rx_bad_outer_l4_csum);
2047 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2048 ports_stats[pt_id].rx_bad_outer_ip_csum);
2050 if (stats.ierrors + stats.rx_nombuf > 0) {
2051 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2052 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2055 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2056 "TX-total: %-"PRIu64"\n",
2057 stats.opackets, ports_stats[pt_id].tx_dropped,
2058 stats.opackets + ports_stats[pt_id].tx_dropped);
2060 if (record_burst_stats) {
2061 if (ports_stats[pt_id].rx_stream)
2062 pkt_burst_stats_display("RX",
2063 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2064 if (ports_stats[pt_id].tx_stream)
2065 pkt_burst_stats_display("TX",
2066 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2069 printf(" %s--------------------------------%s\n",
2070 fwd_stats_border, fwd_stats_border);
2073 printf("\n %s Accumulated forward statistics for all ports"
2075 acc_stats_border, acc_stats_border);
2076 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2078 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2080 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2081 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2082 if (total_rx_nombuf > 0)
2083 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2084 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2086 acc_stats_border, acc_stats_border);
2087 if (record_core_cycles) {
2088 #define CYC_PER_MHZ 1E6
2089 if (total_recv > 0 || total_xmit > 0) {
2090 uint64_t total_pkts = 0;
2091 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2092 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2093 total_pkts = total_xmit;
2095 total_pkts = total_recv;
2097 printf("\n CPU cycles/packet=%.2F (total cycles="
2098 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2100 (double) fwd_cycles / total_pkts,
2101 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2102 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2108 fwd_stats_reset(void)
2114 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2115 pt_id = fwd_ports_ids[i];
2116 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2118 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2119 struct fwd_stream *fs = fwd_streams[sm_id];
2123 fs->fwd_dropped = 0;
2124 fs->rx_bad_ip_csum = 0;
2125 fs->rx_bad_l4_csum = 0;
2126 fs->rx_bad_outer_l4_csum = 0;
2127 fs->rx_bad_outer_ip_csum = 0;
2129 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2130 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2131 fs->core_cycles = 0;
2136 flush_fwd_rx_queues(void)
2138 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2145 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2146 uint64_t timer_period;
2148 if (num_procs > 1) {
2149 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2153 /* convert to number of cycles */
2154 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2156 for (j = 0; j < 2; j++) {
2157 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2158 for (rxq = 0; rxq < nb_rxq; rxq++) {
2159 port_id = fwd_ports_ids[rxp];
2161 * testpmd can stuck in the below do while loop
2162 * if rte_eth_rx_burst() always returns nonzero
2163 * packets. So timer is added to exit this loop
2164 * after 1sec timer expiry.
2166 prev_tsc = rte_rdtsc();
2168 nb_rx = rte_eth_rx_burst(port_id, rxq,
2169 pkts_burst, MAX_PKT_BURST);
2170 for (i = 0; i < nb_rx; i++)
2171 rte_pktmbuf_free(pkts_burst[i]);
2173 cur_tsc = rte_rdtsc();
2174 diff_tsc = cur_tsc - prev_tsc;
2175 timer_tsc += diff_tsc;
2176 } while ((nb_rx > 0) &&
2177 (timer_tsc < timer_period));
2181 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2186 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2188 struct fwd_stream **fsm;
2191 #ifdef RTE_LIB_BITRATESTATS
2192 uint64_t tics_per_1sec;
2193 uint64_t tics_datum;
2194 uint64_t tics_current;
2195 uint16_t i, cnt_ports;
2197 cnt_ports = nb_ports;
2198 tics_datum = rte_rdtsc();
2199 tics_per_1sec = rte_get_timer_hz();
2201 fsm = &fwd_streams[fc->stream_idx];
2202 nb_fs = fc->stream_nb;
2204 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2205 (*pkt_fwd)(fsm[sm_id]);
2206 #ifdef RTE_LIB_BITRATESTATS
2207 if (bitrate_enabled != 0 &&
2208 bitrate_lcore_id == rte_lcore_id()) {
2209 tics_current = rte_rdtsc();
2210 if (tics_current - tics_datum >= tics_per_1sec) {
2211 /* Periodic bitrate calculation */
2212 for (i = 0; i < cnt_ports; i++)
2213 rte_stats_bitrate_calc(bitrate_data,
2215 tics_datum = tics_current;
2219 #ifdef RTE_LIB_LATENCYSTATS
2220 if (latencystats_enabled != 0 &&
2221 latencystats_lcore_id == rte_lcore_id())
2222 rte_latencystats_update();
2225 } while (! fc->stopped);
2229 start_pkt_forward_on_core(void *fwd_arg)
2231 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2232 cur_fwd_config.fwd_eng->packet_fwd);
2237 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2238 * Used to start communication flows in network loopback test configurations.
2241 run_one_txonly_burst_on_core(void *fwd_arg)
2243 struct fwd_lcore *fwd_lc;
2244 struct fwd_lcore tmp_lcore;
2246 fwd_lc = (struct fwd_lcore *) fwd_arg;
2247 tmp_lcore = *fwd_lc;
2248 tmp_lcore.stopped = 1;
2249 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2254 * Launch packet forwarding:
2255 * - Setup per-port forwarding context.
2256 * - launch logical cores with their forwarding configuration.
2259 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2265 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2266 lc_id = fwd_lcores_cpuids[i];
2267 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2268 fwd_lcores[i]->stopped = 0;
2269 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2270 fwd_lcores[i], lc_id);
2273 "launch lcore %u failed - diag=%d\n",
2280 * Launch packet forwarding configuration.
2283 start_packet_forwarding(int with_tx_first)
2285 port_fwd_begin_t port_fwd_begin;
2286 port_fwd_end_t port_fwd_end;
2289 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2290 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2292 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2293 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2295 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2296 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2297 (!nb_rxq || !nb_txq))
2298 rte_exit(EXIT_FAILURE,
2299 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2300 cur_fwd_eng->fwd_mode_name);
2302 if (all_ports_started() == 0) {
2303 fprintf(stderr, "Not all ports were started\n");
2306 if (test_done == 0) {
2307 fprintf(stderr, "Packet forwarding already started\n");
2313 pkt_fwd_config_display(&cur_fwd_config);
2314 if (!pkt_fwd_shared_rxq_check())
2317 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2318 if (port_fwd_begin != NULL) {
2319 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2320 if (port_fwd_begin(fwd_ports_ids[i])) {
2322 "Packet forwarding is not ready\n");
2328 if (with_tx_first) {
2329 port_fwd_begin = tx_only_engine.port_fwd_begin;
2330 if (port_fwd_begin != NULL) {
2331 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2332 if (port_fwd_begin(fwd_ports_ids[i])) {
2334 "Packet forwarding is not ready\n");
2344 flush_fwd_rx_queues();
2346 rxtx_config_display();
2349 if (with_tx_first) {
2350 while (with_tx_first--) {
2351 launch_packet_forwarding(
2352 run_one_txonly_burst_on_core);
2353 rte_eal_mp_wait_lcore();
2355 port_fwd_end = tx_only_engine.port_fwd_end;
2356 if (port_fwd_end != NULL) {
2357 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2358 (*port_fwd_end)(fwd_ports_ids[i]);
2361 launch_packet_forwarding(start_pkt_forward_on_core);
2365 stop_packet_forwarding(void)
2367 port_fwd_end_t port_fwd_end;
2373 fprintf(stderr, "Packet forwarding not started\n");
2376 printf("Telling cores to stop...");
2377 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2378 fwd_lcores[lc_id]->stopped = 1;
2379 printf("\nWaiting for lcores to finish...\n");
2380 rte_eal_mp_wait_lcore();
2381 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2382 if (port_fwd_end != NULL) {
2383 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2384 pt_id = fwd_ports_ids[i];
2385 (*port_fwd_end)(pt_id);
2389 fwd_stats_display();
2391 printf("\nDone.\n");
2396 dev_set_link_up(portid_t pid)
2398 if (rte_eth_dev_set_link_up(pid) < 0)
2399 fprintf(stderr, "\nSet link up fail.\n");
2403 dev_set_link_down(portid_t pid)
2405 if (rte_eth_dev_set_link_down(pid) < 0)
2406 fprintf(stderr, "\nSet link down fail.\n");
2410 all_ports_started(void)
2413 struct rte_port *port;
2415 RTE_ETH_FOREACH_DEV(pi) {
2417 /* Check if there is a port which is not started */
2418 if ((port->port_status != RTE_PORT_STARTED) &&
2419 (port->slave_flag == 0))
2423 /* No port is not started */
2428 port_is_stopped(portid_t port_id)
2430 struct rte_port *port = &ports[port_id];
2432 if ((port->port_status != RTE_PORT_STOPPED) &&
2433 (port->slave_flag == 0))
2439 all_ports_stopped(void)
2443 RTE_ETH_FOREACH_DEV(pi) {
2444 if (!port_is_stopped(pi))
2452 port_is_started(portid_t port_id)
2454 if (port_id_is_invalid(port_id, ENABLED_WARN))
2457 if (ports[port_id].port_status != RTE_PORT_STARTED)
2463 /* Configure the Rx and Tx hairpin queues for the selected port. */
2465 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2468 struct rte_eth_hairpin_conf hairpin_conf = {
2473 struct rte_port *port = &ports[pi];
2474 uint16_t peer_rx_port = pi;
2475 uint16_t peer_tx_port = pi;
2476 uint32_t manual = 1;
2477 uint32_t tx_exp = hairpin_mode & 0x10;
2479 if (!(hairpin_mode & 0xf)) {
2483 } else if (hairpin_mode & 0x1) {
2484 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2485 RTE_ETH_DEV_NO_OWNER);
2486 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2487 peer_tx_port = rte_eth_find_next_owned_by(0,
2488 RTE_ETH_DEV_NO_OWNER);
2489 if (p_pi != RTE_MAX_ETHPORTS) {
2490 peer_rx_port = p_pi;
2494 /* Last port will be the peer RX port of the first. */
2495 RTE_ETH_FOREACH_DEV(next_pi)
2496 peer_rx_port = next_pi;
2499 } else if (hairpin_mode & 0x2) {
2501 peer_rx_port = p_pi;
2503 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2504 RTE_ETH_DEV_NO_OWNER);
2505 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2508 peer_tx_port = peer_rx_port;
2512 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2513 hairpin_conf.peers[0].port = peer_rx_port;
2514 hairpin_conf.peers[0].queue = i + nb_rxq;
2515 hairpin_conf.manual_bind = !!manual;
2516 hairpin_conf.tx_explicit = !!tx_exp;
2517 diag = rte_eth_tx_hairpin_queue_setup
2518 (pi, qi, nb_txd, &hairpin_conf);
2523 /* Fail to setup rx queue, return */
2524 if (port->port_status == RTE_PORT_HANDLING)
2525 port->port_status = RTE_PORT_STOPPED;
2528 "Port %d can not be set back to stopped\n", pi);
2529 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2531 /* try to reconfigure queues next time */
2532 port->need_reconfig_queues = 1;
2535 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2536 hairpin_conf.peers[0].port = peer_tx_port;
2537 hairpin_conf.peers[0].queue = i + nb_txq;
2538 hairpin_conf.manual_bind = !!manual;
2539 hairpin_conf.tx_explicit = !!tx_exp;
2540 diag = rte_eth_rx_hairpin_queue_setup
2541 (pi, qi, nb_rxd, &hairpin_conf);
2546 /* Fail to setup rx queue, return */
2547 if (port->port_status == RTE_PORT_HANDLING)
2548 port->port_status = RTE_PORT_STOPPED;
2551 "Port %d can not be set back to stopped\n", pi);
2552 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2554 /* try to reconfigure queues next time */
2555 port->need_reconfig_queues = 1;
2561 /* Configure the Rx with optional split. */
2563 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2564 uint16_t nb_rx_desc, unsigned int socket_id,
2565 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2567 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2568 unsigned int i, mp_n;
2571 if (rx_pkt_nb_segs <= 1 ||
2572 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2573 rx_conf->rx_seg = NULL;
2574 rx_conf->rx_nseg = 0;
2575 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2576 nb_rx_desc, socket_id,
2580 for (i = 0; i < rx_pkt_nb_segs; i++) {
2581 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2582 struct rte_mempool *mpx;
2584 * Use last valid pool for the segments with number
2585 * exceeding the pool index.
2587 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2588 mpx = mbuf_pool_find(socket_id, mp_n);
2589 /* Handle zero as mbuf data buffer size. */
2590 rx_seg->length = rx_pkt_seg_lengths[i] ?
2591 rx_pkt_seg_lengths[i] :
2592 mbuf_data_size[mp_n];
2593 rx_seg->offset = i < rx_pkt_nb_offs ?
2594 rx_pkt_seg_offsets[i] : 0;
2595 rx_seg->mp = mpx ? mpx : mp;
2597 rx_conf->rx_nseg = rx_pkt_nb_segs;
2598 rx_conf->rx_seg = rx_useg;
2599 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2600 socket_id, rx_conf, NULL);
2601 rx_conf->rx_seg = NULL;
2602 rx_conf->rx_nseg = 0;
2607 alloc_xstats_display_info(portid_t pi)
2609 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2610 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2611 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2613 if (xstats_display_num == 0)
2616 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2617 if (*ids_supp == NULL)
2620 *prev_values = calloc(xstats_display_num,
2621 sizeof(**prev_values));
2622 if (*prev_values == NULL)
2623 goto fail_prev_values;
2625 *curr_values = calloc(xstats_display_num,
2626 sizeof(**curr_values));
2627 if (*curr_values == NULL)
2628 goto fail_curr_values;
2630 ports[pi].xstats_info.allocated = true;
2643 free_xstats_display_info(portid_t pi)
2645 if (!ports[pi].xstats_info.allocated)
2647 free(ports[pi].xstats_info.ids_supp);
2648 free(ports[pi].xstats_info.prev_values);
2649 free(ports[pi].xstats_info.curr_values);
2650 ports[pi].xstats_info.allocated = false;
2653 /** Fill helper structures for specified port to show extended statistics. */
2655 fill_xstats_display_info_for_port(portid_t pi)
2657 unsigned int stat, stat_supp;
2658 const char *xstat_name;
2659 struct rte_port *port;
2663 if (xstats_display_num == 0)
2666 if (pi == (portid_t)RTE_PORT_ALL) {
2667 fill_xstats_display_info();
2672 if (port->port_status != RTE_PORT_STARTED)
2675 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2676 rte_exit(EXIT_FAILURE,
2677 "Failed to allocate xstats display memory\n");
2679 ids_supp = port->xstats_info.ids_supp;
2680 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2681 xstat_name = xstats_display[stat].name;
2682 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2683 ids_supp + stat_supp);
2685 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2686 xstat_name, pi, stat);
2692 port->xstats_info.ids_supp_sz = stat_supp;
2695 /** Fill helper structures for all ports to show extended statistics. */
2697 fill_xstats_display_info(void)
2701 if (xstats_display_num == 0)
2704 RTE_ETH_FOREACH_DEV(pi)
2705 fill_xstats_display_info_for_port(pi);
2709 start_port(portid_t pid)
2711 int diag, need_check_link_status = -1;
2713 portid_t p_pi = RTE_MAX_ETHPORTS;
2714 portid_t pl[RTE_MAX_ETHPORTS];
2715 portid_t peer_pl[RTE_MAX_ETHPORTS];
2716 uint16_t cnt_pi = 0;
2717 uint16_t cfg_pi = 0;
2720 struct rte_port *port;
2721 struct rte_eth_hairpin_cap cap;
2723 if (port_id_is_invalid(pid, ENABLED_WARN))
2726 RTE_ETH_FOREACH_DEV(pi) {
2727 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2730 need_check_link_status = 0;
2732 if (port->port_status == RTE_PORT_STOPPED)
2733 port->port_status = RTE_PORT_HANDLING;
2735 fprintf(stderr, "Port %d is now not stopped\n", pi);
2739 if (port->need_reconfig > 0) {
2740 struct rte_eth_conf dev_conf;
2743 port->need_reconfig = 0;
2745 if (flow_isolate_all) {
2746 int ret = port_flow_isolate(pi, 1);
2749 "Failed to apply isolated mode on port %d\n",
2754 configure_rxtx_dump_callbacks(0);
2755 printf("Configuring Port %d (socket %u)\n", pi,
2757 if (nb_hairpinq > 0 &&
2758 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2760 "Port %d doesn't support hairpin queues\n",
2765 /* configure port */
2766 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2767 nb_txq + nb_hairpinq,
2770 if (port->port_status == RTE_PORT_HANDLING)
2771 port->port_status = RTE_PORT_STOPPED;
2774 "Port %d can not be set back to stopped\n",
2776 fprintf(stderr, "Fail to configure port %d\n",
2778 /* try to reconfigure port next time */
2779 port->need_reconfig = 1;
2782 /* get device configuration*/
2784 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2786 "port %d can not get device configuration\n",
2790 /* Apply Rx offloads configuration */
2791 if (dev_conf.rxmode.offloads !=
2792 port->dev_conf.rxmode.offloads) {
2793 port->dev_conf.rxmode.offloads |=
2794 dev_conf.rxmode.offloads;
2796 k < port->dev_info.max_rx_queues;
2798 port->rx_conf[k].offloads |=
2799 dev_conf.rxmode.offloads;
2801 /* Apply Tx offloads configuration */
2802 if (dev_conf.txmode.offloads !=
2803 port->dev_conf.txmode.offloads) {
2804 port->dev_conf.txmode.offloads |=
2805 dev_conf.txmode.offloads;
2807 k < port->dev_info.max_tx_queues;
2809 port->tx_conf[k].offloads |=
2810 dev_conf.txmode.offloads;
2813 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2814 port->need_reconfig_queues = 0;
2815 /* setup tx queues */
2816 for (qi = 0; qi < nb_txq; qi++) {
2817 if ((numa_support) &&
2818 (txring_numa[pi] != NUMA_NO_CONFIG))
2819 diag = rte_eth_tx_queue_setup(pi, qi,
2820 port->nb_tx_desc[qi],
2822 &(port->tx_conf[qi]));
2824 diag = rte_eth_tx_queue_setup(pi, qi,
2825 port->nb_tx_desc[qi],
2827 &(port->tx_conf[qi]));
2832 /* Fail to setup tx queue, return */
2833 if (port->port_status == RTE_PORT_HANDLING)
2834 port->port_status = RTE_PORT_STOPPED;
2837 "Port %d can not be set back to stopped\n",
2840 "Fail to configure port %d tx queues\n",
2842 /* try to reconfigure queues next time */
2843 port->need_reconfig_queues = 1;
2846 for (qi = 0; qi < nb_rxq; qi++) {
2847 /* setup rx queues */
2848 if ((numa_support) &&
2849 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2850 struct rte_mempool * mp =
2852 (rxring_numa[pi], 0);
2855 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2860 diag = rx_queue_setup(pi, qi,
2861 port->nb_rx_desc[qi],
2863 &(port->rx_conf[qi]),
2866 struct rte_mempool *mp =
2868 (port->socket_id, 0);
2871 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2875 diag = rx_queue_setup(pi, qi,
2876 port->nb_rx_desc[qi],
2878 &(port->rx_conf[qi]),
2884 /* Fail to setup rx queue, return */
2885 if (port->port_status == RTE_PORT_HANDLING)
2886 port->port_status = RTE_PORT_STOPPED;
2889 "Port %d can not be set back to stopped\n",
2892 "Fail to configure port %d rx queues\n",
2894 /* try to reconfigure queues next time */
2895 port->need_reconfig_queues = 1;
2898 /* setup hairpin queues */
2899 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2902 configure_rxtx_dump_callbacks(verbose_level);
2904 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2908 "Port %d: Failed to disable Ptype parsing\n",
2916 diag = eth_dev_start_mp(pi);
2918 fprintf(stderr, "Fail to start port %d: %s\n",
2919 pi, rte_strerror(-diag));
2921 /* Fail to setup rx queue, return */
2922 if (port->port_status == RTE_PORT_HANDLING)
2923 port->port_status = RTE_PORT_STOPPED;
2926 "Port %d can not be set back to stopped\n",
2931 if (port->port_status == RTE_PORT_HANDLING)
2932 port->port_status = RTE_PORT_STARTED;
2934 fprintf(stderr, "Port %d can not be set into started\n",
2937 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2938 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2939 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2941 /* at least one port started, need checking link status */
2942 need_check_link_status = 1;
2947 if (need_check_link_status == 1 && !no_link_check)
2948 check_all_ports_link_status(RTE_PORT_ALL);
2949 else if (need_check_link_status == 0)
2950 fprintf(stderr, "Please stop the ports first\n");
2952 if (hairpin_mode & 0xf) {
2956 /* bind all started hairpin ports */
2957 for (i = 0; i < cfg_pi; i++) {
2959 /* bind current Tx to all peer Rx */
2960 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2961 RTE_MAX_ETHPORTS, 1);
2964 for (j = 0; j < peer_pi; j++) {
2965 if (!port_is_started(peer_pl[j]))
2967 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2970 "Error during binding hairpin Tx port %u to %u: %s\n",
2972 rte_strerror(-diag));
2976 /* bind all peer Tx to current Rx */
2977 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2978 RTE_MAX_ETHPORTS, 0);
2981 for (j = 0; j < peer_pi; j++) {
2982 if (!port_is_started(peer_pl[j]))
2984 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2987 "Error during binding hairpin Tx port %u to %u: %s\n",
2989 rte_strerror(-diag));
2996 fill_xstats_display_info_for_port(pid);
3003 stop_port(portid_t pid)
3006 struct rte_port *port;
3007 int need_check_link_status = 0;
3008 portid_t peer_pl[RTE_MAX_ETHPORTS];
3011 if (port_id_is_invalid(pid, ENABLED_WARN))
3014 printf("Stopping ports...\n");
3016 RTE_ETH_FOREACH_DEV(pi) {
3017 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3020 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3022 "Please remove port %d from forwarding configuration.\n",
3027 if (port_is_bonding_slave(pi)) {
3029 "Please remove port %d from bonded device.\n",
3035 if (port->port_status == RTE_PORT_STARTED)
3036 port->port_status = RTE_PORT_HANDLING;
3040 if (hairpin_mode & 0xf) {
3043 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3044 /* unbind all peer Tx from current Rx */
3045 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3046 RTE_MAX_ETHPORTS, 0);
3049 for (j = 0; j < peer_pi; j++) {
3050 if (!port_is_started(peer_pl[j]))
3052 rte_eth_hairpin_unbind(peer_pl[j], pi);
3056 if (port->flow_list)
3057 port_flow_flush(pi);
3059 if (eth_dev_stop_mp(pi) != 0)
3060 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3063 if (port->port_status == RTE_PORT_HANDLING)
3064 port->port_status = RTE_PORT_STOPPED;
3066 fprintf(stderr, "Port %d can not be set into stopped\n",
3068 need_check_link_status = 1;
3070 if (need_check_link_status && !no_link_check)
3071 check_all_ports_link_status(RTE_PORT_ALL);
3077 remove_invalid_ports_in(portid_t *array, portid_t *total)
3080 portid_t new_total = 0;
3082 for (i = 0; i < *total; i++)
3083 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3084 array[new_total] = array[i];
3091 remove_invalid_ports(void)
3093 remove_invalid_ports_in(ports_ids, &nb_ports);
3094 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3095 nb_cfg_ports = nb_fwd_ports;
3099 close_port(portid_t pid)
3102 struct rte_port *port;
3104 if (port_id_is_invalid(pid, ENABLED_WARN))
3107 printf("Closing ports...\n");
3109 RTE_ETH_FOREACH_DEV(pi) {
3110 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3113 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3115 "Please remove port %d from forwarding configuration.\n",
3120 if (port_is_bonding_slave(pi)) {
3122 "Please remove port %d from bonded device.\n",
3128 if (port->port_status == RTE_PORT_CLOSED) {
3129 fprintf(stderr, "Port %d is already closed\n", pi);
3133 if (is_proc_primary()) {
3134 port_flow_flush(pi);
3135 port_flex_item_flush(pi);
3136 rte_eth_dev_close(pi);
3139 free_xstats_display_info(pi);
3142 remove_invalid_ports();
3147 reset_port(portid_t pid)
3151 struct rte_port *port;
3153 if (port_id_is_invalid(pid, ENABLED_WARN))
3156 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3157 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3159 "Can not reset port(s), please stop port(s) first.\n");
3163 printf("Resetting ports...\n");
3165 RTE_ETH_FOREACH_DEV(pi) {
3166 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3169 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3171 "Please remove port %d from forwarding configuration.\n",
3176 if (port_is_bonding_slave(pi)) {
3178 "Please remove port %d from bonded device.\n",
3183 diag = rte_eth_dev_reset(pi);
3186 port->need_reconfig = 1;
3187 port->need_reconfig_queues = 1;
3189 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3198 attach_port(char *identifier)
3201 struct rte_dev_iterator iterator;
3203 printf("Attaching a new port...\n");
3205 if (identifier == NULL) {
3206 fprintf(stderr, "Invalid parameters are specified\n");
3210 if (rte_dev_probe(identifier) < 0) {
3211 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3215 /* first attach mode: event */
3216 if (setup_on_probe_event) {
3217 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3218 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3219 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3220 ports[pi].need_setup != 0)
3221 setup_attached_port(pi);
3225 /* second attach mode: iterator */
3226 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3227 /* setup ports matching the devargs used for probing */
3228 if (port_is_forwarding(pi))
3229 continue; /* port was already attached before */
3230 setup_attached_port(pi);
3235 setup_attached_port(portid_t pi)
3237 unsigned int socket_id;
3240 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3241 /* if socket_id is invalid, set to the first available socket. */
3242 if (check_socket_id(socket_id) < 0)
3243 socket_id = socket_ids[0];
3244 reconfig(pi, socket_id);
3245 ret = rte_eth_promiscuous_enable(pi);
3248 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3249 pi, rte_strerror(-ret));
3251 ports_ids[nb_ports++] = pi;
3252 fwd_ports_ids[nb_fwd_ports++] = pi;
3253 nb_cfg_ports = nb_fwd_ports;
3254 ports[pi].need_setup = 0;
3255 ports[pi].port_status = RTE_PORT_STOPPED;
3257 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3262 detach_device(struct rte_device *dev)
3267 fprintf(stderr, "Device already removed\n");
3271 printf("Removing a device...\n");
3273 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3274 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3275 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3276 fprintf(stderr, "Port %u not stopped\n",
3280 port_flow_flush(sibling);
3284 if (rte_dev_remove(dev) < 0) {
3285 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3288 remove_invalid_ports();
3290 printf("Device is detached\n");
3291 printf("Now total ports is %d\n", nb_ports);
3297 detach_port_device(portid_t port_id)
3300 struct rte_eth_dev_info dev_info;
3302 if (port_id_is_invalid(port_id, ENABLED_WARN))
3305 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3306 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3307 fprintf(stderr, "Port not stopped\n");
3310 fprintf(stderr, "Port was not closed\n");
3313 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3316 "Failed to get device info for port %d, not detaching\n",
3320 detach_device(dev_info.device);
3324 detach_devargs(char *identifier)
3326 struct rte_dev_iterator iterator;
3327 struct rte_devargs da;
3330 printf("Removing a device...\n");
3332 memset(&da, 0, sizeof(da));
3333 if (rte_devargs_parsef(&da, "%s", identifier)) {
3334 fprintf(stderr, "cannot parse identifier\n");
3338 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3339 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3340 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3341 fprintf(stderr, "Port %u not stopped\n",
3343 rte_eth_iterator_cleanup(&iterator);
3344 rte_devargs_reset(&da);
3347 port_flow_flush(port_id);
3351 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3352 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3353 da.name, da.bus->name);
3354 rte_devargs_reset(&da);
3358 remove_invalid_ports();
3360 printf("Device %s is detached\n", identifier);
3361 printf("Now total ports is %d\n", nb_ports);
3363 rte_devargs_reset(&da);
3374 stop_packet_forwarding();
3376 #ifndef RTE_EXEC_ENV_WINDOWS
3377 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3379 if (mp_alloc_type == MP_ALLOC_ANON)
3380 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3385 if (ports != NULL) {
3387 RTE_ETH_FOREACH_DEV(pt_id) {
3388 printf("\nStopping port %d...\n", pt_id);
3392 RTE_ETH_FOREACH_DEV(pt_id) {
3393 printf("\nShutting down port %d...\n", pt_id);
3400 ret = rte_dev_event_monitor_stop();
3403 "fail to stop device event monitor.");
3407 ret = rte_dev_event_callback_unregister(NULL,
3408 dev_event_callback, NULL);
3411 "fail to unregister device event callback.\n");
3415 ret = rte_dev_hotplug_handle_disable();
3418 "fail to disable hotplug handling.\n");
3422 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3424 mempool_free_mp(mempools[i]);
3426 free(xstats_display);
3428 printf("\nBye...\n");
3431 typedef void (*cmd_func_t)(void);
3432 struct pmd_test_command {
3433 const char *cmd_name;
3434 cmd_func_t cmd_func;
3437 /* Check the link status of all ports in up to 9s, and print them finally */
3439 check_all_ports_link_status(uint32_t port_mask)
3441 #define CHECK_INTERVAL 100 /* 100ms */
3442 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3444 uint8_t count, all_ports_up, print_flag = 0;
3445 struct rte_eth_link link;
3447 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3449 printf("Checking link statuses...\n");
3451 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3453 RTE_ETH_FOREACH_DEV(portid) {
3454 if ((port_mask & (1 << portid)) == 0)
3456 memset(&link, 0, sizeof(link));
3457 ret = rte_eth_link_get_nowait(portid, &link);
3460 if (print_flag == 1)
3462 "Port %u link get failed: %s\n",
3463 portid, rte_strerror(-ret));
3466 /* print link status if flag set */
3467 if (print_flag == 1) {
3468 rte_eth_link_to_str(link_status,
3469 sizeof(link_status), &link);
3470 printf("Port %d %s\n", portid, link_status);
3473 /* clear all_ports_up flag if any link down */
3474 if (link.link_status == RTE_ETH_LINK_DOWN) {
3479 /* after finally printing all link status, get out */
3480 if (print_flag == 1)
3483 if (all_ports_up == 0) {
3485 rte_delay_ms(CHECK_INTERVAL);
3488 /* set the print_flag if all ports up or timeout */
3489 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3499 rmv_port_callback(void *arg)
3501 int need_to_start = 0;
3502 int org_no_link_check = no_link_check;
3503 portid_t port_id = (intptr_t)arg;
3504 struct rte_eth_dev_info dev_info;
3507 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3509 if (!test_done && port_is_forwarding(port_id)) {
3511 stop_packet_forwarding();
3515 no_link_check = org_no_link_check;
3517 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3520 "Failed to get device info for port %d, not detaching\n",
3523 struct rte_device *device = dev_info.device;
3524 close_port(port_id);
3525 detach_device(device); /* might be already removed or have more ports */
3528 start_packet_forwarding(0);
3531 /* This function is used by the interrupt thread */
3533 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3536 RTE_SET_USED(param);
3537 RTE_SET_USED(ret_param);
3539 if (type >= RTE_ETH_EVENT_MAX) {
3541 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3542 port_id, __func__, type);
3544 } else if (event_print_mask & (UINT32_C(1) << type)) {
3545 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3546 eth_event_desc[type]);
3551 case RTE_ETH_EVENT_NEW:
3552 ports[port_id].need_setup = 1;
3553 ports[port_id].port_status = RTE_PORT_HANDLING;
3555 case RTE_ETH_EVENT_INTR_RMV:
3556 if (port_id_is_invalid(port_id, DISABLED_WARN))
3558 if (rte_eal_alarm_set(100000,
3559 rmv_port_callback, (void *)(intptr_t)port_id))
3561 "Could not set up deferred device removal\n");
3563 case RTE_ETH_EVENT_DESTROY:
3564 ports[port_id].port_status = RTE_PORT_CLOSED;
3565 printf("Port %u is closed\n", port_id);
3574 register_eth_event_callback(void)
3577 enum rte_eth_event_type event;
3579 for (event = RTE_ETH_EVENT_UNKNOWN;
3580 event < RTE_ETH_EVENT_MAX; event++) {
3581 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3586 TESTPMD_LOG(ERR, "Failed to register callback for "
3587 "%s event\n", eth_event_desc[event]);
3595 /* This function is used by the interrupt thread */
3597 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3598 __rte_unused void *arg)
3603 if (type >= RTE_DEV_EVENT_MAX) {
3604 fprintf(stderr, "%s called upon invalid event %d\n",
3610 case RTE_DEV_EVENT_REMOVE:
3611 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3613 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3615 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3620 * Because the user's callback is invoked in eal interrupt
3621 * callback, the interrupt callback need to be finished before
3622 * it can be unregistered when detaching device. So finish
3623 * callback soon and use a deferred removal to detach device
3624 * is need. It is a workaround, once the device detaching be
3625 * moved into the eal in the future, the deferred removal could
3628 if (rte_eal_alarm_set(100000,
3629 rmv_port_callback, (void *)(intptr_t)port_id))
3631 "Could not set up deferred device removal\n");
3633 case RTE_DEV_EVENT_ADD:
3634 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3636 /* TODO: After finish kernel driver binding,
3637 * begin to attach port.
3646 rxtx_port_config(portid_t pid)
3650 struct rte_port *port = &ports[pid];
3652 for (qid = 0; qid < nb_rxq; qid++) {
3653 offloads = port->rx_conf[qid].offloads;
3654 port->rx_conf[qid] = port->dev_info.default_rxconf;
3656 if (rxq_share > 0 &&
3657 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3658 /* Non-zero share group to enable RxQ share. */
3659 port->rx_conf[qid].share_group = pid / rxq_share + 1;
3660 port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3664 port->rx_conf[qid].offloads = offloads;
3666 /* Check if any Rx parameters have been passed */
3667 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3668 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3670 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3671 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3673 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3674 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3676 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3677 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3679 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3680 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3682 port->nb_rx_desc[qid] = nb_rxd;
3685 for (qid = 0; qid < nb_txq; qid++) {
3686 offloads = port->tx_conf[qid].offloads;
3687 port->tx_conf[qid] = port->dev_info.default_txconf;
3689 port->tx_conf[qid].offloads = offloads;
3691 /* Check if any Tx parameters have been passed */
3692 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3693 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3695 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3696 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3698 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3699 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3701 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3702 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3704 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3705 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3707 port->nb_tx_desc[qid] = nb_txd;
3712 * Helper function to set MTU from frame size
3714 * port->dev_info should be set before calling this function.
3716 * return 0 on success, negative on error
3719 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3721 struct rte_port *port = &ports[portid];
3722 uint32_t eth_overhead;
3723 uint16_t mtu, new_mtu;
3725 eth_overhead = get_eth_overhead(&port->dev_info);
3727 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3728 printf("Failed to get MTU for port %u\n", portid);
3732 new_mtu = max_rx_pktlen - eth_overhead;
3737 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3739 "Failed to set MTU to %u for port %u\n",
3744 port->dev_conf.rxmode.mtu = new_mtu;
3750 init_port_config(void)
3753 struct rte_port *port;
3756 RTE_ETH_FOREACH_DEV(pid) {
3758 port->dev_conf.fdir_conf = fdir_conf;
3760 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3765 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3766 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3767 rss_hf & port->dev_info.flow_type_rss_offloads;
3769 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3770 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3773 if (port->dcb_flag == 0) {
3774 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3775 port->dev_conf.rxmode.mq_mode =
3776 (enum rte_eth_rx_mq_mode)
3777 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3779 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3780 port->dev_conf.rxmode.offloads &=
3781 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3784 i < port->dev_info.nb_rx_queues;
3786 port->rx_conf[i].offloads &=
3787 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3791 rxtx_port_config(pid);
3793 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3797 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3798 rte_pmd_ixgbe_bypass_init(pid);
3801 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3802 port->dev_conf.intr_conf.lsc = 1;
3803 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3804 port->dev_conf.intr_conf.rmv = 1;
3808 void set_port_slave_flag(portid_t slave_pid)
3810 struct rte_port *port;
3812 port = &ports[slave_pid];
3813 port->slave_flag = 1;
3816 void clear_port_slave_flag(portid_t slave_pid)
3818 struct rte_port *port;
3820 port = &ports[slave_pid];
3821 port->slave_flag = 0;
3824 uint8_t port_is_bonding_slave(portid_t slave_pid)
3826 struct rte_port *port;
3827 struct rte_eth_dev_info dev_info;
3830 port = &ports[slave_pid];
3831 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3834 "Failed to get device info for port id %d,"
3835 "cannot determine if the port is a bonded slave",
3839 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3844 const uint16_t vlan_tags[] = {
3845 0, 1, 2, 3, 4, 5, 6, 7,
3846 8, 9, 10, 11, 12, 13, 14, 15,
3847 16, 17, 18, 19, 20, 21, 22, 23,
3848 24, 25, 26, 27, 28, 29, 30, 31
3852 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3853 enum dcb_mode_enable dcb_mode,
3854 enum rte_eth_nb_tcs num_tcs,
3859 struct rte_eth_rss_conf rss_conf;
3862 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3863 * given above, and the number of traffic classes available for use.
3865 if (dcb_mode == DCB_VT_ENABLED) {
3866 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3867 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3868 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3869 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3871 /* VMDQ+DCB RX and TX configurations */
3872 vmdq_rx_conf->enable_default_pool = 0;
3873 vmdq_rx_conf->default_pool = 0;
3874 vmdq_rx_conf->nb_queue_pools =
3875 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3876 vmdq_tx_conf->nb_queue_pools =
3877 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3879 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3880 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3881 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3882 vmdq_rx_conf->pool_map[i].pools =
3883 1 << (i % vmdq_rx_conf->nb_queue_pools);
3885 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3886 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3887 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3890 /* set DCB mode of RX and TX of multiple queues */
3891 eth_conf->rxmode.mq_mode =
3892 (enum rte_eth_rx_mq_mode)
3893 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
3894 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
3896 struct rte_eth_dcb_rx_conf *rx_conf =
3897 ð_conf->rx_adv_conf.dcb_rx_conf;
3898 struct rte_eth_dcb_tx_conf *tx_conf =
3899 ð_conf->tx_adv_conf.dcb_tx_conf;
3901 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3903 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3907 rx_conf->nb_tcs = num_tcs;
3908 tx_conf->nb_tcs = num_tcs;
3910 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3911 rx_conf->dcb_tc[i] = i % num_tcs;
3912 tx_conf->dcb_tc[i] = i % num_tcs;
3915 eth_conf->rxmode.mq_mode =
3916 (enum rte_eth_rx_mq_mode)
3917 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
3918 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3919 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
3923 eth_conf->dcb_capability_en =
3924 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
3926 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
3932 init_port_dcb_config(portid_t pid,
3933 enum dcb_mode_enable dcb_mode,
3934 enum rte_eth_nb_tcs num_tcs,
3937 struct rte_eth_conf port_conf;
3938 struct rte_port *rte_port;
3942 if (num_procs > 1) {
3943 printf("The multi-process feature doesn't support dcb.\n");
3946 rte_port = &ports[pid];
3948 /* retain the original device configuration. */
3949 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3951 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3952 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3955 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3956 /* remove RSS HASH offload for DCB in vt mode */
3957 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
3958 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3959 for (i = 0; i < nb_rxq; i++)
3960 rte_port->rx_conf[i].offloads &=
3961 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3964 /* re-configure the device . */
3965 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3969 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3973 /* If dev_info.vmdq_pool_base is greater than 0,
3974 * the queue id of vmdq pools is started after pf queues.
3976 if (dcb_mode == DCB_VT_ENABLED &&
3977 rte_port->dev_info.vmdq_pool_base > 0) {
3979 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3984 /* Assume the ports in testpmd have the same dcb capability
3985 * and has the same number of rxq and txq in dcb mode
3987 if (dcb_mode == DCB_VT_ENABLED) {
3988 if (rte_port->dev_info.max_vfs > 0) {
3989 nb_rxq = rte_port->dev_info.nb_rx_queues;
3990 nb_txq = rte_port->dev_info.nb_tx_queues;
3992 nb_rxq = rte_port->dev_info.max_rx_queues;
3993 nb_txq = rte_port->dev_info.max_tx_queues;
3996 /*if vt is disabled, use all pf queues */
3997 if (rte_port->dev_info.vmdq_pool_base == 0) {
3998 nb_rxq = rte_port->dev_info.max_rx_queues;
3999 nb_txq = rte_port->dev_info.max_tx_queues;
4001 nb_rxq = (queueid_t)num_tcs;
4002 nb_txq = (queueid_t)num_tcs;
4006 rx_free_thresh = 64;
4008 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4010 rxtx_port_config(pid);
4012 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4013 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4014 rx_vft_set(pid, vlan_tags[i], 1);
4016 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4020 rte_port->dcb_flag = 1;
4022 /* Enter DCB configuration status */
4033 /* Configuration of Ethernet ports. */
4034 ports = rte_zmalloc("testpmd: ports",
4035 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4036 RTE_CACHE_LINE_SIZE);
4037 if (ports == NULL) {
4038 rte_exit(EXIT_FAILURE,
4039 "rte_zmalloc(%d struct rte_port) failed\n",
4042 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4043 ports[i].xstats_info.allocated = false;
4044 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4045 LIST_INIT(&ports[i].flow_tunnel_list);
4046 /* Initialize ports NUMA structures */
4047 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4048 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4049 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4063 const char clr[] = { 27, '[', '2', 'J', '\0' };
4064 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4066 /* Clear screen and move to top left */
4067 printf("%s%s", clr, top_left);
4069 printf("\nPort statistics ====================================");
4070 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4071 nic_stats_display(fwd_ports_ids[i]);
4077 signal_handler(int signum)
4079 if (signum == SIGINT || signum == SIGTERM) {
4080 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4082 #ifdef RTE_LIB_PDUMP
4083 /* uninitialize packet capture framework */
4086 #ifdef RTE_LIB_LATENCYSTATS
4087 if (latencystats_enabled != 0)
4088 rte_latencystats_uninit();
4091 /* Set flag to indicate the force termination. */
4093 /* exit with the expected status */
4094 #ifndef RTE_EXEC_ENV_WINDOWS
4095 signal(signum, SIG_DFL);
4096 kill(getpid(), signum);
4102 main(int argc, char** argv)
4109 signal(SIGINT, signal_handler);
4110 signal(SIGTERM, signal_handler);
4112 testpmd_logtype = rte_log_register("testpmd");
4113 if (testpmd_logtype < 0)
4114 rte_exit(EXIT_FAILURE, "Cannot register log type");
4115 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4117 diag = rte_eal_init(argc, argv);
4119 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4120 rte_strerror(rte_errno));
4122 ret = register_eth_event_callback();
4124 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4126 #ifdef RTE_LIB_PDUMP
4127 /* initialize packet capture framework */
4132 RTE_ETH_FOREACH_DEV(port_id) {
4133 ports_ids[count] = port_id;
4136 nb_ports = (portid_t) count;
4138 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4140 /* allocate port structures, and init them */
4143 set_def_fwd_config();
4145 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4146 "Check the core mask argument\n");
4148 /* Bitrate/latency stats disabled by default */
4149 #ifdef RTE_LIB_BITRATESTATS
4150 bitrate_enabled = 0;
4152 #ifdef RTE_LIB_LATENCYSTATS
4153 latencystats_enabled = 0;
4156 /* on FreeBSD, mlockall() is disabled by default */
4157 #ifdef RTE_EXEC_ENV_FREEBSD
4166 launch_args_parse(argc, argv);
4168 #ifndef RTE_EXEC_ENV_WINDOWS
4169 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4170 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4175 if (tx_first && interactive)
4176 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4177 "interactive mode.\n");
4179 if (tx_first && lsc_interrupt) {
4181 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4185 if (!nb_rxq && !nb_txq)
4187 "Warning: Either rx or tx queues should be non-zero\n");
4189 if (nb_rxq > 1 && nb_rxq > nb_txq)
4191 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4197 ret = rte_dev_hotplug_handle_enable();
4200 "fail to enable hotplug handling.");
4204 ret = rte_dev_event_monitor_start();
4207 "fail to start device event monitoring.");
4211 ret = rte_dev_event_callback_register(NULL,
4212 dev_event_callback, NULL);
4215 "fail to register device event callback\n");
4220 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4221 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4223 /* set all ports to promiscuous mode by default */
4224 RTE_ETH_FOREACH_DEV(port_id) {
4225 ret = rte_eth_promiscuous_enable(port_id);
4228 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4229 port_id, rte_strerror(-ret));
4232 #ifdef RTE_LIB_METRICS
4233 /* Init metrics library */
4234 rte_metrics_init(rte_socket_id());
4237 #ifdef RTE_LIB_LATENCYSTATS
4238 if (latencystats_enabled != 0) {
4239 int ret = rte_latencystats_init(1, NULL);
4242 "Warning: latencystats init() returned error %d\n",
4244 fprintf(stderr, "Latencystats running on lcore %d\n",
4245 latencystats_lcore_id);
4249 /* Setup bitrate stats */
4250 #ifdef RTE_LIB_BITRATESTATS
4251 if (bitrate_enabled != 0) {
4252 bitrate_data = rte_stats_bitrate_create();
4253 if (bitrate_data == NULL)
4254 rte_exit(EXIT_FAILURE,
4255 "Could not allocate bitrate data.\n");
4256 rte_stats_bitrate_reg(bitrate_data);
4259 #ifdef RTE_LIB_CMDLINE
4260 if (strlen(cmdline_filename) != 0)
4261 cmdline_read_from_file(cmdline_filename);
4263 if (interactive == 1) {
4265 printf("Start automatic packet forwarding\n");
4266 start_packet_forwarding(0);
4278 printf("No commandline core given, start packet forwarding\n");
4279 start_packet_forwarding(tx_first);
4280 if (stats_period != 0) {
4281 uint64_t prev_time = 0, cur_time, diff_time = 0;
4282 uint64_t timer_period;
4284 /* Convert to number of cycles */
4285 timer_period = stats_period * rte_get_timer_hz();
4287 while (f_quit == 0) {
4288 cur_time = rte_get_timer_cycles();
4289 diff_time += cur_time - prev_time;
4291 if (diff_time >= timer_period) {
4293 /* Reset the timer */
4296 /* Sleep to avoid unnecessary checks */
4297 prev_time = cur_time;
4298 rte_delay_us_sleep(US_PER_S);
4302 printf("Press enter to exit\n");
4303 rc = read(0, &c, 1);
4309 ret = rte_eal_cleanup();
4311 rte_exit(EXIT_FAILURE,
4312 "EAL cleanup failed: %s\n", strerror(-ret));
4314 return EXIT_SUCCESS;