1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
73 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
74 #define HUGE_FLAG (0x40000)
76 #define HUGE_FLAG MAP_HUGETLB
79 #ifndef MAP_HUGE_SHIFT
80 /* older kernels (or FreeBSD) will not have this define */
81 #define HUGE_SHIFT (26)
83 #define HUGE_SHIFT MAP_HUGE_SHIFT
86 #define EXTMEM_HEAP_NAME "extmem"
88 * Zone size with the malloc overhead (max of debug and release variants)
89 * must fit into the smallest supported hugepage size (2M),
90 * so that an IOVA-contiguous zone of this size can always be allocated
91 * if there are free 2M hugepages.
93 #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
95 uint16_t verbose_level = 0; /**< Silent by default. */
96 int testpmd_logtype; /**< Log type for testpmd logs */
98 /* use main core for command line ? */
99 uint8_t interactive = 0;
100 uint8_t auto_start = 0;
102 char cmdline_filename[PATH_MAX] = {0};
105 * NUMA support configuration.
106 * When set, the NUMA support attempts to dispatch the allocation of the
107 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
108 * probed ports among the CPU sockets 0 and 1.
109 * Otherwise, all memory is allocated from CPU socket 0.
111 uint8_t numa_support = 1; /**< numa enabled by default */
114 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
117 uint8_t socket_num = UMA_NO_CONFIG;
120 * Select mempool allocation type:
121 * - native: use regular DPDK memory
122 * - anon: use regular DPDK memory to create mempool, but populate using
123 * anonymous memory (may not be IOVA-contiguous)
124 * - xmem: use externally allocated hugepage memory
126 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
129 * Store specified sockets on which memory pool to be used by ports
132 uint8_t port_numa[RTE_MAX_ETHPORTS];
135 * Store specified sockets on which RX ring to be used by ports
138 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
141 * Store specified sockets on which TX ring to be used by ports
144 uint8_t txring_numa[RTE_MAX_ETHPORTS];
147 * Record the Ethernet address of peer target ports to which packets are
149 * Must be instantiated with the ethernet addresses of peer traffic generator
152 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
153 portid_t nb_peer_eth_addrs = 0;
156 * Probed Target Environment.
158 struct rte_port *ports; /**< For all probed ethernet ports. */
159 portid_t nb_ports; /**< Number of probed ethernet ports. */
160 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
161 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
163 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
166 * Test Forwarding Configuration.
167 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
168 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
170 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
171 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
172 portid_t nb_cfg_ports; /**< Number of configured ports. */
173 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
175 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
176 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
178 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
179 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
182 * Forwarding engines.
184 struct fwd_engine * fwd_engines[] = {
194 &five_tuple_swap_fwd_engine,
195 #ifdef RTE_LIBRTE_IEEE1588
196 &ieee1588_fwd_engine,
202 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
203 uint16_t mempool_flags;
205 struct fwd_config cur_fwd_config;
206 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
207 uint32_t retry_enabled;
208 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
209 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
211 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
212 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
213 DEFAULT_MBUF_DATA_SIZE
214 }; /**< Mbuf data space size. */
215 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
216 * specified on command-line. */
217 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
219 /** Extended statistics to show. */
220 struct rte_eth_xstat_name *xstats_display;
222 unsigned int xstats_display_num; /**< Size of extended statistics to show */
225 * In container, it cannot terminate the process which running with 'stats-period'
226 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
231 * Max Rx frame size, set by '--max-pkt-len' parameter.
233 uint32_t max_rx_pkt_len;
236 * Configuration of packet segments used to scatter received packets
237 * if some of split features is configured.
239 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
240 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
241 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
242 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
245 * Configuration of packet segments used by the "txonly" processing engine.
247 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
248 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
249 TXONLY_DEF_PACKET_LEN,
251 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
253 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
254 /**< Split policy for packets to TX. */
256 uint8_t txonly_multi_flow;
257 /**< Whether multiple flows are generated in TXONLY mode. */
259 uint32_t tx_pkt_times_inter;
260 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
262 uint32_t tx_pkt_times_intra;
263 /**< Timings for send scheduling in TXONLY mode, time between packets. */
265 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
266 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
267 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
268 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
270 /* current configuration is in DCB or not,0 means it is not in DCB mode */
271 uint8_t dcb_config = 0;
274 * Configurable number of RX/TX queues.
276 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
277 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
278 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
281 * Configurable number of RX/TX ring descriptors.
282 * Defaults are supplied by drivers via ethdev.
284 #define RTE_TEST_RX_DESC_DEFAULT 0
285 #define RTE_TEST_TX_DESC_DEFAULT 0
286 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
287 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
289 #define RTE_PMD_PARAM_UNSET -1
291 * Configurable values of RX and TX ring threshold registers.
294 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
295 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
296 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
298 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
299 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
300 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
303 * Configurable value of RX free threshold.
305 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
308 * Configurable value of RX drop enable.
310 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
313 * Configurable value of TX free threshold.
315 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
318 * Configurable value of TX RS bit threshold.
320 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
323 * Configurable value of buffered packets before sending.
325 uint16_t noisy_tx_sw_bufsz;
328 * Configurable value of packet buffer timeout.
330 uint16_t noisy_tx_sw_buf_flush_time;
333 * Configurable value for size of VNF internal memory area
334 * used for simulating noisy neighbour behaviour
336 uint64_t noisy_lkup_mem_sz;
339 * Configurable value of number of random writes done in
340 * VNF simulation memory area.
342 uint64_t noisy_lkup_num_writes;
345 * Configurable value of number of random reads done in
346 * VNF simulation memory area.
348 uint64_t noisy_lkup_num_reads;
351 * Configurable value of number of random reads/writes done in
352 * VNF simulation memory area.
354 uint64_t noisy_lkup_num_reads_writes;
357 * Receive Side Scaling (RSS) configuration.
359 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
362 * Port topology configuration
364 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
367 * Avoids to flush all the RX streams before starts forwarding.
369 uint8_t no_flush_rx = 0; /* flush by default */
372 * Flow API isolated mode.
374 uint8_t flow_isolate_all;
377 * Avoids to check link status when starting/stopping a port.
379 uint8_t no_link_check = 0; /* check by default */
382 * Don't automatically start all ports in interactive mode.
384 uint8_t no_device_start = 0;
387 * Enable link status change notification
389 uint8_t lsc_interrupt = 1; /* enabled by default */
392 * Enable device removal notification.
394 uint8_t rmv_interrupt = 1; /* enabled by default */
396 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
398 /* After attach, port setup is called on event or by iterator */
399 bool setup_on_probe_event = true;
401 /* Clear ptypes on port initialization. */
402 uint8_t clear_ptypes = true;
404 /* Hairpin ports configuration mode. */
405 uint16_t hairpin_mode;
407 /* Pretty printing of ethdev events */
408 static const char * const eth_event_desc[] = {
409 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
410 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
411 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
412 [RTE_ETH_EVENT_INTR_RESET] = "reset",
413 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
414 [RTE_ETH_EVENT_IPSEC] = "IPsec",
415 [RTE_ETH_EVENT_MACSEC] = "MACsec",
416 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
417 [RTE_ETH_EVENT_NEW] = "device probed",
418 [RTE_ETH_EVENT_DESTROY] = "device released",
419 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
420 [RTE_ETH_EVENT_MAX] = NULL,
424 * Display or mask ether events
425 * Default to all events except VF_MBOX
427 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
428 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
429 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
430 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
431 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
432 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
433 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
434 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
436 * Decide if all memory are locked for performance.
441 * NIC bypass mode configuration options.
444 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
445 /* The NIC bypass watchdog timeout. */
446 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
450 #ifdef RTE_LIB_LATENCYSTATS
453 * Set when latency stats is enabled in the commandline
455 uint8_t latencystats_enabled;
458 * Lcore ID to service latency statistics.
460 lcoreid_t latencystats_lcore_id = -1;
465 * Ethernet device configuration.
467 struct rte_eth_rxmode rx_mode;
469 struct rte_eth_txmode tx_mode = {
470 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
473 struct rte_eth_fdir_conf fdir_conf = {
474 .mode = RTE_FDIR_MODE_NONE,
475 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
476 .status = RTE_FDIR_REPORT_STATUS,
478 .vlan_tci_mask = 0xFFEF,
480 .src_ip = 0xFFFFFFFF,
481 .dst_ip = 0xFFFFFFFF,
484 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
485 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
487 .src_port_mask = 0xFFFF,
488 .dst_port_mask = 0xFFFF,
489 .mac_addr_byte_mask = 0xFF,
490 .tunnel_type_mask = 1,
491 .tunnel_id_mask = 0xFFFFFFFF,
496 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
499 * Display zero values by default for xstats
501 uint8_t xstats_hide_zero;
504 * Measure of CPU cycles disabled by default
506 uint8_t record_core_cycles;
509 * Display of RX and TX bursts disabled by default
511 uint8_t record_burst_stats;
514 * Number of ports per shared Rx queue group, 0 disable.
518 unsigned int num_sockets = 0;
519 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
521 #ifdef RTE_LIB_BITRATESTATS
522 /* Bitrate statistics */
523 struct rte_stats_bitrates *bitrate_data;
524 lcoreid_t bitrate_lcore_id;
525 uint8_t bitrate_enabled;
529 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
530 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
534 * hexadecimal bitmask of RX mq mode can be enabled.
536 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
539 * Used to set forced link speed
541 uint32_t eth_link_speed;
544 * ID of the current process in multi-process, used to
545 * configure the queues to be polled.
550 * Number of processes in multi-process, used to
551 * configure the queues to be polled.
553 unsigned int num_procs = 1;
556 eth_rx_metadata_negotiate_mp(uint16_t port_id)
558 uint64_t rx_meta_features = 0;
561 if (!is_proc_primary())
564 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
565 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
566 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
568 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
570 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
571 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
575 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
576 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
580 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
581 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
584 } else if (ret != -ENOTSUP) {
585 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
586 port_id, rte_strerror(-ret));
591 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
592 const struct rte_eth_conf *dev_conf)
594 if (is_proc_primary())
595 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
601 eth_dev_start_mp(uint16_t port_id)
603 if (is_proc_primary())
604 return rte_eth_dev_start(port_id);
610 eth_dev_stop_mp(uint16_t port_id)
612 if (is_proc_primary())
613 return rte_eth_dev_stop(port_id);
619 mempool_free_mp(struct rte_mempool *mp)
621 if (is_proc_primary())
622 rte_mempool_free(mp);
626 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
628 if (is_proc_primary())
629 return rte_eth_dev_set_mtu(port_id, mtu);
634 /* Forward function declarations */
635 static void setup_attached_port(portid_t pi);
636 static void check_all_ports_link_status(uint32_t port_mask);
637 static int eth_event_callback(portid_t port_id,
638 enum rte_eth_event_type type,
639 void *param, void *ret_param);
640 static void dev_event_callback(const char *device_name,
641 enum rte_dev_event_type type,
643 static void fill_xstats_display_info(void);
646 * Check if all the ports are started.
647 * If yes, return positive value. If not, return zero.
649 static int all_ports_started(void);
652 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
653 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
656 /* Holds the registered mbuf dynamic flags names. */
657 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
661 * Helper function to check if socket is already discovered.
662 * If yes, return positive value. If not, return zero.
665 new_socket_id(unsigned int socket_id)
669 for (i = 0; i < num_sockets; i++) {
670 if (socket_ids[i] == socket_id)
677 * Setup default configuration.
680 set_default_fwd_lcores_config(void)
684 unsigned int sock_num;
687 for (i = 0; i < RTE_MAX_LCORE; i++) {
688 if (!rte_lcore_is_enabled(i))
690 sock_num = rte_lcore_to_socket_id(i);
691 if (new_socket_id(sock_num)) {
692 if (num_sockets >= RTE_MAX_NUMA_NODES) {
693 rte_exit(EXIT_FAILURE,
694 "Total sockets greater than %u\n",
697 socket_ids[num_sockets++] = sock_num;
699 if (i == rte_get_main_lcore())
701 fwd_lcores_cpuids[nb_lc++] = i;
703 nb_lcores = (lcoreid_t) nb_lc;
704 nb_cfg_lcores = nb_lcores;
709 set_def_peer_eth_addrs(void)
713 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
714 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
715 peer_eth_addrs[i].addr_bytes[5] = i;
720 set_default_fwd_ports_config(void)
725 RTE_ETH_FOREACH_DEV(pt_id) {
726 fwd_ports_ids[i++] = pt_id;
728 /* Update sockets info according to the attached device */
729 int socket_id = rte_eth_dev_socket_id(pt_id);
730 if (socket_id >= 0 && new_socket_id(socket_id)) {
731 if (num_sockets >= RTE_MAX_NUMA_NODES) {
732 rte_exit(EXIT_FAILURE,
733 "Total sockets greater than %u\n",
736 socket_ids[num_sockets++] = socket_id;
740 nb_cfg_ports = nb_ports;
741 nb_fwd_ports = nb_ports;
745 set_def_fwd_config(void)
747 set_default_fwd_lcores_config();
748 set_def_peer_eth_addrs();
749 set_default_fwd_ports_config();
752 #ifndef RTE_EXEC_ENV_WINDOWS
753 /* extremely pessimistic estimation of memory required to create a mempool */
755 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
757 unsigned int n_pages, mbuf_per_pg, leftover;
758 uint64_t total_mem, mbuf_mem, obj_sz;
760 /* there is no good way to predict how much space the mempool will
761 * occupy because it will allocate chunks on the fly, and some of those
762 * will come from default DPDK memory while some will come from our
763 * external memory, so just assume 128MB will be enough for everyone.
765 uint64_t hdr_mem = 128 << 20;
767 /* account for possible non-contiguousness */
768 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
770 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
774 mbuf_per_pg = pgsz / obj_sz;
775 leftover = (nb_mbufs % mbuf_per_pg) > 0;
776 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
778 mbuf_mem = n_pages * pgsz;
780 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
782 if (total_mem > SIZE_MAX) {
783 TESTPMD_LOG(ERR, "Memory size too big\n");
786 *out = (size_t)total_mem;
792 pagesz_flags(uint64_t page_sz)
794 /* as per mmap() manpage, all page sizes are log2 of page size
795 * shifted by MAP_HUGE_SHIFT
797 int log2 = rte_log2_u64(page_sz);
799 return (log2 << HUGE_SHIFT);
803 alloc_mem(size_t memsz, size_t pgsz, bool huge)
808 /* allocate anonymous hugepages */
809 flags = MAP_ANONYMOUS | MAP_PRIVATE;
811 flags |= HUGE_FLAG | pagesz_flags(pgsz);
813 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
814 if (addr == MAP_FAILED)
820 struct extmem_param {
824 rte_iova_t *iova_table;
825 unsigned int iova_table_len;
829 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
832 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
833 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
834 unsigned int cur_page, n_pages, pgsz_idx;
835 size_t mem_sz, cur_pgsz;
836 rte_iova_t *iovas = NULL;
840 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
841 /* skip anything that is too big */
842 if (pgsizes[pgsz_idx] > SIZE_MAX)
845 cur_pgsz = pgsizes[pgsz_idx];
847 /* if we were told not to allocate hugepages, override */
849 cur_pgsz = sysconf(_SC_PAGESIZE);
851 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
853 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
857 /* allocate our memory */
858 addr = alloc_mem(mem_sz, cur_pgsz, huge);
860 /* if we couldn't allocate memory with a specified page size,
861 * that doesn't mean we can't do it with other page sizes, so
867 /* store IOVA addresses for every page in this memory area */
868 n_pages = mem_sz / cur_pgsz;
870 iovas = malloc(sizeof(*iovas) * n_pages);
873 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
876 /* lock memory if it's not huge pages */
880 /* populate IOVA addresses */
881 for (cur_page = 0; cur_page < n_pages; cur_page++) {
886 offset = cur_pgsz * cur_page;
887 cur = RTE_PTR_ADD(addr, offset);
889 /* touch the page before getting its IOVA */
890 *(volatile char *)cur = 0;
892 iova = rte_mem_virt2iova(cur);
894 iovas[cur_page] = iova;
899 /* if we couldn't allocate anything */
905 param->pgsz = cur_pgsz;
906 param->iova_table = iovas;
907 param->iova_table_len = n_pages;
913 munmap(addr, mem_sz);
919 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
921 struct extmem_param param;
924 memset(¶m, 0, sizeof(param));
926 /* check if our heap exists */
927 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
929 /* create our heap */
930 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
932 TESTPMD_LOG(ERR, "Cannot create heap\n");
937 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
939 TESTPMD_LOG(ERR, "Cannot create memory area\n");
943 /* we now have a valid memory area, so add it to heap */
944 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
945 param.addr, param.len, param.iova_table,
946 param.iova_table_len, param.pgsz);
948 /* when using VFIO, memory is automatically mapped for DMA by EAL */
950 /* not needed any more */
951 free(param.iova_table);
954 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
955 munmap(param.addr, param.len);
961 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
967 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
968 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
973 RTE_ETH_FOREACH_DEV(pid) {
974 struct rte_eth_dev_info dev_info;
976 ret = eth_dev_info_get_print_err(pid, &dev_info);
979 "unable to get device info for port %d on addr 0x%p,"
980 "mempool unmapping will not be performed\n",
985 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
988 "unable to DMA unmap addr 0x%p "
990 memhdr->addr, dev_info.device->name);
993 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
996 "unable to un-register addr 0x%p\n", memhdr->addr);
1001 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1002 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1005 size_t page_size = sysconf(_SC_PAGESIZE);
1008 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1012 "unable to register addr 0x%p\n", memhdr->addr);
1015 RTE_ETH_FOREACH_DEV(pid) {
1016 struct rte_eth_dev_info dev_info;
1018 ret = eth_dev_info_get_print_err(pid, &dev_info);
1021 "unable to get device info for port %d on addr 0x%p,"
1022 "mempool mapping will not be performed\n",
1026 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1029 "unable to DMA map addr 0x%p "
1031 memhdr->addr, dev_info.device->name);
1038 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1039 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1041 struct rte_pktmbuf_extmem *xmem;
1042 unsigned int ext_num, zone_num, elt_num;
1045 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1046 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1047 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1049 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1051 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1052 "external buffer descriptors\n");
1056 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1057 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1058 const struct rte_memzone *mz;
1059 char mz_name[RTE_MEMZONE_NAMESIZE];
1062 ret = snprintf(mz_name, sizeof(mz_name),
1063 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1064 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1065 errno = ENAMETOOLONG;
1069 mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
1071 RTE_MEMZONE_IOVA_CONTIG |
1073 RTE_MEMZONE_SIZE_HINT_ONLY);
1076 * The caller exits on external buffer creation
1077 * error, so there is no need to free memzones.
1083 xseg->buf_ptr = mz->addr;
1084 xseg->buf_iova = mz->iova;
1085 xseg->buf_len = EXTBUF_ZONE_SIZE;
1086 xseg->elt_size = elt_size;
1088 if (ext_num == 0 && xmem != NULL) {
1097 * Configuration initialisation done once at init time.
1099 static struct rte_mempool *
1100 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1101 unsigned int socket_id, uint16_t size_idx)
1103 char pool_name[RTE_MEMPOOL_NAMESIZE];
1104 struct rte_mempool *rte_mp = NULL;
1105 #ifndef RTE_EXEC_ENV_WINDOWS
1108 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1110 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1111 if (!is_proc_primary()) {
1112 rte_mp = rte_mempool_lookup(pool_name);
1114 rte_exit(EXIT_FAILURE,
1115 "Get mbuf pool for socket %u failed: %s\n",
1116 socket_id, rte_strerror(rte_errno));
1121 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1122 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1124 switch (mp_alloc_type) {
1125 case MP_ALLOC_NATIVE:
1127 /* wrapper to rte_mempool_create() */
1128 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1129 rte_mbuf_best_mempool_ops());
1130 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1131 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1134 #ifndef RTE_EXEC_ENV_WINDOWS
1137 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1138 mb_size, (unsigned int) mb_mempool_cache,
1139 sizeof(struct rte_pktmbuf_pool_private),
1140 socket_id, mempool_flags);
1144 if (rte_mempool_populate_anon(rte_mp) == 0) {
1145 rte_mempool_free(rte_mp);
1149 rte_pktmbuf_pool_init(rte_mp, NULL);
1150 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1151 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1155 case MP_ALLOC_XMEM_HUGE:
1158 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1160 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1161 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1164 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1165 if (heap_socket < 0)
1166 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1168 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1169 rte_mbuf_best_mempool_ops());
1170 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1171 mb_mempool_cache, 0, mbuf_seg_size,
1178 struct rte_pktmbuf_extmem *ext_mem;
1179 unsigned int ext_num;
1181 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1182 socket_id, pool_name, &ext_mem);
1184 rte_exit(EXIT_FAILURE,
1185 "Can't create pinned data buffers\n");
1187 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1188 rte_mbuf_best_mempool_ops());
1189 rte_mp = rte_pktmbuf_pool_create_extbuf
1190 (pool_name, nb_mbuf, mb_mempool_cache,
1191 0, mbuf_seg_size, socket_id,
1198 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1202 #ifndef RTE_EXEC_ENV_WINDOWS
1205 if (rte_mp == NULL) {
1206 rte_exit(EXIT_FAILURE,
1207 "Creation of mbuf pool for socket %u failed: %s\n",
1208 socket_id, rte_strerror(rte_errno));
1209 } else if (verbose_level > 0) {
1210 rte_mempool_dump(stdout, rte_mp);
1216 * Check given socket id is valid or not with NUMA mode,
1217 * if valid, return 0, else return -1
1220 check_socket_id(const unsigned int socket_id)
1222 static int warning_once = 0;
1224 if (new_socket_id(socket_id)) {
1225 if (!warning_once && numa_support)
1227 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1235 * Get the allowed maximum number of RX queues.
1236 * *pid return the port id which has minimal value of
1237 * max_rx_queues in all ports.
1240 get_allowed_max_nb_rxq(portid_t *pid)
1242 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1243 bool max_rxq_valid = false;
1245 struct rte_eth_dev_info dev_info;
1247 RTE_ETH_FOREACH_DEV(pi) {
1248 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1251 max_rxq_valid = true;
1252 if (dev_info.max_rx_queues < allowed_max_rxq) {
1253 allowed_max_rxq = dev_info.max_rx_queues;
1257 return max_rxq_valid ? allowed_max_rxq : 0;
1261 * Check input rxq is valid or not.
1262 * If input rxq is not greater than any of maximum number
1263 * of RX queues of all ports, it is valid.
1264 * if valid, return 0, else return -1
1267 check_nb_rxq(queueid_t rxq)
1269 queueid_t allowed_max_rxq;
1272 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1273 if (rxq > allowed_max_rxq) {
1275 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1276 rxq, allowed_max_rxq, pid);
1283 * Get the allowed maximum number of TX queues.
1284 * *pid return the port id which has minimal value of
1285 * max_tx_queues in all ports.
1288 get_allowed_max_nb_txq(portid_t *pid)
1290 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1291 bool max_txq_valid = false;
1293 struct rte_eth_dev_info dev_info;
1295 RTE_ETH_FOREACH_DEV(pi) {
1296 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1299 max_txq_valid = true;
1300 if (dev_info.max_tx_queues < allowed_max_txq) {
1301 allowed_max_txq = dev_info.max_tx_queues;
1305 return max_txq_valid ? allowed_max_txq : 0;
1309 * Check input txq is valid or not.
1310 * If input txq is not greater than any of maximum number
1311 * of TX queues of all ports, it is valid.
1312 * if valid, return 0, else return -1
1315 check_nb_txq(queueid_t txq)
1317 queueid_t allowed_max_txq;
1320 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1321 if (txq > allowed_max_txq) {
1323 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1324 txq, allowed_max_txq, pid);
1331 * Get the allowed maximum number of RXDs of every rx queue.
1332 * *pid return the port id which has minimal value of
1333 * max_rxd in all queues of all ports.
1336 get_allowed_max_nb_rxd(portid_t *pid)
1338 uint16_t allowed_max_rxd = UINT16_MAX;
1340 struct rte_eth_dev_info dev_info;
1342 RTE_ETH_FOREACH_DEV(pi) {
1343 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1346 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1347 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1351 return allowed_max_rxd;
1355 * Get the allowed minimal number of RXDs of every rx queue.
1356 * *pid return the port id which has minimal value of
1357 * min_rxd in all queues of all ports.
1360 get_allowed_min_nb_rxd(portid_t *pid)
1362 uint16_t allowed_min_rxd = 0;
1364 struct rte_eth_dev_info dev_info;
1366 RTE_ETH_FOREACH_DEV(pi) {
1367 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1370 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1371 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1376 return allowed_min_rxd;
1380 * Check input rxd is valid or not.
1381 * If input rxd is not greater than any of maximum number
1382 * of RXDs of every Rx queues and is not less than any of
1383 * minimal number of RXDs of every Rx queues, it is valid.
1384 * if valid, return 0, else return -1
1387 check_nb_rxd(queueid_t rxd)
1389 uint16_t allowed_max_rxd;
1390 uint16_t allowed_min_rxd;
1393 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1394 if (rxd > allowed_max_rxd) {
1396 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1397 rxd, allowed_max_rxd, pid);
1401 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1402 if (rxd < allowed_min_rxd) {
1404 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1405 rxd, allowed_min_rxd, pid);
1413 * Get the allowed maximum number of TXDs of every rx queues.
1414 * *pid return the port id which has minimal value of
1415 * max_txd in every tx queue.
1418 get_allowed_max_nb_txd(portid_t *pid)
1420 uint16_t allowed_max_txd = UINT16_MAX;
1422 struct rte_eth_dev_info dev_info;
1424 RTE_ETH_FOREACH_DEV(pi) {
1425 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1428 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1429 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1433 return allowed_max_txd;
1437 * Get the allowed maximum number of TXDs of every tx queues.
1438 * *pid return the port id which has minimal value of
1439 * min_txd in every tx queue.
1442 get_allowed_min_nb_txd(portid_t *pid)
1444 uint16_t allowed_min_txd = 0;
1446 struct rte_eth_dev_info dev_info;
1448 RTE_ETH_FOREACH_DEV(pi) {
1449 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1452 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1453 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1458 return allowed_min_txd;
1462 * Check input txd is valid or not.
1463 * If input txd is not greater than any of maximum number
1464 * of TXDs of every Rx queues, it is valid.
1465 * if valid, return 0, else return -1
1468 check_nb_txd(queueid_t txd)
1470 uint16_t allowed_max_txd;
1471 uint16_t allowed_min_txd;
1474 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1475 if (txd > allowed_max_txd) {
1477 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1478 txd, allowed_max_txd, pid);
1482 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1483 if (txd < allowed_min_txd) {
1485 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1486 txd, allowed_min_txd, pid);
1494 * Get the allowed maximum number of hairpin queues.
1495 * *pid return the port id which has minimal value of
1496 * max_hairpin_queues in all ports.
1499 get_allowed_max_nb_hairpinq(portid_t *pid)
1501 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1503 struct rte_eth_hairpin_cap cap;
1505 RTE_ETH_FOREACH_DEV(pi) {
1506 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1510 if (cap.max_nb_queues < allowed_max_hairpinq) {
1511 allowed_max_hairpinq = cap.max_nb_queues;
1515 return allowed_max_hairpinq;
1519 * Check input hairpin is valid or not.
1520 * If input hairpin is not greater than any of maximum number
1521 * of hairpin queues of all ports, it is valid.
1522 * if valid, return 0, else return -1
1525 check_nb_hairpinq(queueid_t hairpinq)
1527 queueid_t allowed_max_hairpinq;
1530 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1531 if (hairpinq > allowed_max_hairpinq) {
1533 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1534 hairpinq, allowed_max_hairpinq, pid);
1541 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1543 uint32_t eth_overhead;
1545 if (dev_info->max_mtu != UINT16_MAX &&
1546 dev_info->max_rx_pktlen > dev_info->max_mtu)
1547 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1549 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1551 return eth_overhead;
1555 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1557 struct rte_port *port = &ports[pid];
1561 eth_rx_metadata_negotiate_mp(pid);
1563 port->dev_conf.txmode = tx_mode;
1564 port->dev_conf.rxmode = rx_mode;
1566 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1568 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1570 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1571 port->dev_conf.txmode.offloads &=
1572 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1574 /* Apply Rx offloads configuration */
1575 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1576 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1577 /* Apply Tx offloads configuration */
1578 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1579 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1582 port->dev_conf.link_speeds = eth_link_speed;
1585 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1586 get_eth_overhead(&port->dev_info);
1588 /* set flag to initialize port/queue */
1589 port->need_reconfig = 1;
1590 port->need_reconfig_queues = 1;
1591 port->socket_id = socket_id;
1592 port->tx_metadata = 0;
1595 * Check for maximum number of segments per MTU.
1596 * Accordingly update the mbuf data size.
1598 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1599 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1600 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1603 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1604 uint16_t data_size = (mtu + eth_overhead) /
1605 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1606 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1608 if (buffer_size > mbuf_data_size[0]) {
1609 mbuf_data_size[0] = buffer_size;
1610 TESTPMD_LOG(WARNING,
1611 "Configured mbuf size of the first segment %hu\n",
1622 struct rte_mempool *mbp;
1623 unsigned int nb_mbuf_per_pool;
1626 struct rte_gro_param gro_param;
1632 /* Configuration of logical cores. */
1633 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1634 sizeof(struct fwd_lcore *) * nb_lcores,
1635 RTE_CACHE_LINE_SIZE);
1636 if (fwd_lcores == NULL) {
1637 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1638 "failed\n", nb_lcores);
1640 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1641 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1642 sizeof(struct fwd_lcore),
1643 RTE_CACHE_LINE_SIZE);
1644 if (fwd_lcores[lc_id] == NULL) {
1645 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1648 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1651 RTE_ETH_FOREACH_DEV(pid) {
1655 socket_id = port_numa[pid];
1656 if (port_numa[pid] == NUMA_NO_CONFIG) {
1657 socket_id = rte_eth_dev_socket_id(pid);
1660 * if socket_id is invalid,
1661 * set to the first available socket.
1663 if (check_socket_id(socket_id) < 0)
1664 socket_id = socket_ids[0];
1667 socket_id = (socket_num == UMA_NO_CONFIG) ?
1670 /* Apply default TxRx configuration for all ports */
1671 init_config_port_offloads(pid, socket_id);
1674 * Create pools of mbuf.
1675 * If NUMA support is disabled, create a single pool of mbuf in
1676 * socket 0 memory by default.
1677 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1679 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1680 * nb_txd can be configured at run time.
1682 if (param_total_num_mbufs)
1683 nb_mbuf_per_pool = param_total_num_mbufs;
1685 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1686 (nb_lcores * mb_mempool_cache) +
1687 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1688 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1694 for (i = 0; i < num_sockets; i++)
1695 for (j = 0; j < mbuf_data_size_n; j++)
1696 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1697 mbuf_pool_create(mbuf_data_size[j],
1703 for (i = 0; i < mbuf_data_size_n; i++)
1704 mempools[i] = mbuf_pool_create
1707 socket_num == UMA_NO_CONFIG ?
1714 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1715 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1718 * Records which Mbuf pool to use by each logical core, if needed.
1720 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1721 mbp = mbuf_pool_find(
1722 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1725 mbp = mbuf_pool_find(0, 0);
1726 fwd_lcores[lc_id]->mbp = mbp;
1728 /* initialize GSO context */
1729 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1730 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1731 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1732 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1734 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1741 /* create a gro context for each lcore */
1742 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1743 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1744 gro_param.max_item_per_flow = MAX_PKT_BURST;
1745 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1746 gro_param.socket_id = rte_lcore_to_socket_id(
1747 fwd_lcores_cpuids[lc_id]);
1748 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1749 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1750 rte_exit(EXIT_FAILURE,
1751 "rte_gro_ctx_create() failed\n");
1759 reconfig(portid_t new_port_id, unsigned socket_id)
1761 /* Reconfiguration of Ethernet ports. */
1762 init_config_port_offloads(new_port_id, socket_id);
1768 init_fwd_streams(void)
1771 struct rte_port *port;
1772 streamid_t sm_id, nb_fwd_streams_new;
1775 /* set socket id according to numa or not */
1776 RTE_ETH_FOREACH_DEV(pid) {
1778 if (nb_rxq > port->dev_info.max_rx_queues) {
1780 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1781 nb_rxq, port->dev_info.max_rx_queues);
1784 if (nb_txq > port->dev_info.max_tx_queues) {
1786 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1787 nb_txq, port->dev_info.max_tx_queues);
1791 if (port_numa[pid] != NUMA_NO_CONFIG)
1792 port->socket_id = port_numa[pid];
1794 port->socket_id = rte_eth_dev_socket_id(pid);
1797 * if socket_id is invalid,
1798 * set to the first available socket.
1800 if (check_socket_id(port->socket_id) < 0)
1801 port->socket_id = socket_ids[0];
1805 if (socket_num == UMA_NO_CONFIG)
1806 port->socket_id = 0;
1808 port->socket_id = socket_num;
1812 q = RTE_MAX(nb_rxq, nb_txq);
1815 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1818 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1819 if (nb_fwd_streams_new == nb_fwd_streams)
1822 if (fwd_streams != NULL) {
1823 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1824 if (fwd_streams[sm_id] == NULL)
1826 rte_free(fwd_streams[sm_id]);
1827 fwd_streams[sm_id] = NULL;
1829 rte_free(fwd_streams);
1834 nb_fwd_streams = nb_fwd_streams_new;
1835 if (nb_fwd_streams) {
1836 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1837 sizeof(struct fwd_stream *) * nb_fwd_streams,
1838 RTE_CACHE_LINE_SIZE);
1839 if (fwd_streams == NULL)
1840 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1841 " (struct fwd_stream *)) failed\n",
1844 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1845 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1846 " struct fwd_stream", sizeof(struct fwd_stream),
1847 RTE_CACHE_LINE_SIZE);
1848 if (fwd_streams[sm_id] == NULL)
1849 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1850 "(struct fwd_stream) failed\n");
1858 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1860 uint64_t total_burst, sburst;
1862 uint64_t burst_stats[4];
1863 uint16_t pktnb_stats[4];
1865 int burst_percent[4], sburstp;
1869 * First compute the total number of packet bursts and the
1870 * two highest numbers of bursts of the same number of packets.
1872 memset(&burst_stats, 0x0, sizeof(burst_stats));
1873 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1875 /* Show stats for 0 burst size always */
1876 total_burst = pbs->pkt_burst_spread[0];
1877 burst_stats[0] = pbs->pkt_burst_spread[0];
1880 /* Find the next 2 burst sizes with highest occurrences. */
1881 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1882 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1887 total_burst += nb_burst;
1889 if (nb_burst > burst_stats[1]) {
1890 burst_stats[2] = burst_stats[1];
1891 pktnb_stats[2] = pktnb_stats[1];
1892 burst_stats[1] = nb_burst;
1893 pktnb_stats[1] = nb_pkt;
1894 } else if (nb_burst > burst_stats[2]) {
1895 burst_stats[2] = nb_burst;
1896 pktnb_stats[2] = nb_pkt;
1899 if (total_burst == 0)
1902 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1903 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1905 printf("%d%% of other]\n", 100 - sburstp);
1909 sburst += burst_stats[i];
1910 if (sburst == total_burst) {
1911 printf("%d%% of %d pkts]\n",
1912 100 - sburstp, (int) pktnb_stats[i]);
1917 (double)burst_stats[i] / total_burst * 100;
1918 printf("%d%% of %d pkts + ",
1919 burst_percent[i], (int) pktnb_stats[i]);
1920 sburstp += burst_percent[i];
1925 fwd_stream_stats_display(streamid_t stream_id)
1927 struct fwd_stream *fs;
1928 static const char *fwd_top_stats_border = "-------";
1930 fs = fwd_streams[stream_id];
1931 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1932 (fs->fwd_dropped == 0))
1934 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1935 "TX Port=%2d/Queue=%2d %s\n",
1936 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1937 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1938 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1939 " TX-dropped: %-14"PRIu64,
1940 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1942 /* if checksum mode */
1943 if (cur_fwd_eng == &csum_fwd_engine) {
1944 printf(" RX- bad IP checksum: %-14"PRIu64
1945 " Rx- bad L4 checksum: %-14"PRIu64
1946 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1947 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1948 fs->rx_bad_outer_l4_csum);
1949 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1950 fs->rx_bad_outer_ip_csum);
1955 if (record_burst_stats) {
1956 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1957 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1962 fwd_stats_display(void)
1964 static const char *fwd_stats_border = "----------------------";
1965 static const char *acc_stats_border = "+++++++++++++++";
1967 struct fwd_stream *rx_stream;
1968 struct fwd_stream *tx_stream;
1969 uint64_t tx_dropped;
1970 uint64_t rx_bad_ip_csum;
1971 uint64_t rx_bad_l4_csum;
1972 uint64_t rx_bad_outer_l4_csum;
1973 uint64_t rx_bad_outer_ip_csum;
1974 } ports_stats[RTE_MAX_ETHPORTS];
1975 uint64_t total_rx_dropped = 0;
1976 uint64_t total_tx_dropped = 0;
1977 uint64_t total_rx_nombuf = 0;
1978 struct rte_eth_stats stats;
1979 uint64_t fwd_cycles = 0;
1980 uint64_t total_recv = 0;
1981 uint64_t total_xmit = 0;
1982 struct rte_port *port;
1987 memset(ports_stats, 0, sizeof(ports_stats));
1989 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1990 struct fwd_stream *fs = fwd_streams[sm_id];
1992 if (cur_fwd_config.nb_fwd_streams >
1993 cur_fwd_config.nb_fwd_ports) {
1994 fwd_stream_stats_display(sm_id);
1996 ports_stats[fs->tx_port].tx_stream = fs;
1997 ports_stats[fs->rx_port].rx_stream = fs;
2000 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2002 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2003 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2004 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2005 fs->rx_bad_outer_l4_csum;
2006 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2007 fs->rx_bad_outer_ip_csum;
2009 if (record_core_cycles)
2010 fwd_cycles += fs->core_cycles;
2012 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2013 pt_id = fwd_ports_ids[i];
2014 port = &ports[pt_id];
2016 rte_eth_stats_get(pt_id, &stats);
2017 stats.ipackets -= port->stats.ipackets;
2018 stats.opackets -= port->stats.opackets;
2019 stats.ibytes -= port->stats.ibytes;
2020 stats.obytes -= port->stats.obytes;
2021 stats.imissed -= port->stats.imissed;
2022 stats.oerrors -= port->stats.oerrors;
2023 stats.rx_nombuf -= port->stats.rx_nombuf;
2025 total_recv += stats.ipackets;
2026 total_xmit += stats.opackets;
2027 total_rx_dropped += stats.imissed;
2028 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2029 total_tx_dropped += stats.oerrors;
2030 total_rx_nombuf += stats.rx_nombuf;
2032 printf("\n %s Forward statistics for port %-2d %s\n",
2033 fwd_stats_border, pt_id, fwd_stats_border);
2035 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2036 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2037 stats.ipackets + stats.imissed);
2039 if (cur_fwd_eng == &csum_fwd_engine) {
2040 printf(" Bad-ipcsum: %-14"PRIu64
2041 " Bad-l4csum: %-14"PRIu64
2042 "Bad-outer-l4csum: %-14"PRIu64"\n",
2043 ports_stats[pt_id].rx_bad_ip_csum,
2044 ports_stats[pt_id].rx_bad_l4_csum,
2045 ports_stats[pt_id].rx_bad_outer_l4_csum);
2046 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2047 ports_stats[pt_id].rx_bad_outer_ip_csum);
2049 if (stats.ierrors + stats.rx_nombuf > 0) {
2050 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2051 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2054 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2055 "TX-total: %-"PRIu64"\n",
2056 stats.opackets, ports_stats[pt_id].tx_dropped,
2057 stats.opackets + ports_stats[pt_id].tx_dropped);
2059 if (record_burst_stats) {
2060 if (ports_stats[pt_id].rx_stream)
2061 pkt_burst_stats_display("RX",
2062 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2063 if (ports_stats[pt_id].tx_stream)
2064 pkt_burst_stats_display("TX",
2065 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2068 printf(" %s--------------------------------%s\n",
2069 fwd_stats_border, fwd_stats_border);
2072 printf("\n %s Accumulated forward statistics for all ports"
2074 acc_stats_border, acc_stats_border);
2075 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2077 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2079 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2080 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2081 if (total_rx_nombuf > 0)
2082 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2083 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2085 acc_stats_border, acc_stats_border);
2086 if (record_core_cycles) {
2087 #define CYC_PER_MHZ 1E6
2088 if (total_recv > 0 || total_xmit > 0) {
2089 uint64_t total_pkts = 0;
2090 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2091 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2092 total_pkts = total_xmit;
2094 total_pkts = total_recv;
2096 printf("\n CPU cycles/packet=%.2F (total cycles="
2097 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2099 (double) fwd_cycles / total_pkts,
2100 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2101 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2107 fwd_stats_reset(void)
2113 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2114 pt_id = fwd_ports_ids[i];
2115 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2117 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2118 struct fwd_stream *fs = fwd_streams[sm_id];
2122 fs->fwd_dropped = 0;
2123 fs->rx_bad_ip_csum = 0;
2124 fs->rx_bad_l4_csum = 0;
2125 fs->rx_bad_outer_l4_csum = 0;
2126 fs->rx_bad_outer_ip_csum = 0;
2128 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2129 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2130 fs->core_cycles = 0;
2135 flush_fwd_rx_queues(void)
2137 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2144 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2145 uint64_t timer_period;
2147 if (num_procs > 1) {
2148 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2152 /* convert to number of cycles */
2153 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2155 for (j = 0; j < 2; j++) {
2156 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2157 for (rxq = 0; rxq < nb_rxq; rxq++) {
2158 port_id = fwd_ports_ids[rxp];
2160 * testpmd can stuck in the below do while loop
2161 * if rte_eth_rx_burst() always returns nonzero
2162 * packets. So timer is added to exit this loop
2163 * after 1sec timer expiry.
2165 prev_tsc = rte_rdtsc();
2167 nb_rx = rte_eth_rx_burst(port_id, rxq,
2168 pkts_burst, MAX_PKT_BURST);
2169 for (i = 0; i < nb_rx; i++)
2170 rte_pktmbuf_free(pkts_burst[i]);
2172 cur_tsc = rte_rdtsc();
2173 diff_tsc = cur_tsc - prev_tsc;
2174 timer_tsc += diff_tsc;
2175 } while ((nb_rx > 0) &&
2176 (timer_tsc < timer_period));
2180 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2185 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2187 struct fwd_stream **fsm;
2190 #ifdef RTE_LIB_BITRATESTATS
2191 uint64_t tics_per_1sec;
2192 uint64_t tics_datum;
2193 uint64_t tics_current;
2194 uint16_t i, cnt_ports;
2196 cnt_ports = nb_ports;
2197 tics_datum = rte_rdtsc();
2198 tics_per_1sec = rte_get_timer_hz();
2200 fsm = &fwd_streams[fc->stream_idx];
2201 nb_fs = fc->stream_nb;
2203 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2204 (*pkt_fwd)(fsm[sm_id]);
2205 #ifdef RTE_LIB_BITRATESTATS
2206 if (bitrate_enabled != 0 &&
2207 bitrate_lcore_id == rte_lcore_id()) {
2208 tics_current = rte_rdtsc();
2209 if (tics_current - tics_datum >= tics_per_1sec) {
2210 /* Periodic bitrate calculation */
2211 for (i = 0; i < cnt_ports; i++)
2212 rte_stats_bitrate_calc(bitrate_data,
2214 tics_datum = tics_current;
2218 #ifdef RTE_LIB_LATENCYSTATS
2219 if (latencystats_enabled != 0 &&
2220 latencystats_lcore_id == rte_lcore_id())
2221 rte_latencystats_update();
2224 } while (! fc->stopped);
2228 start_pkt_forward_on_core(void *fwd_arg)
2230 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2231 cur_fwd_config.fwd_eng->packet_fwd);
2236 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2237 * Used to start communication flows in network loopback test configurations.
2240 run_one_txonly_burst_on_core(void *fwd_arg)
2242 struct fwd_lcore *fwd_lc;
2243 struct fwd_lcore tmp_lcore;
2245 fwd_lc = (struct fwd_lcore *) fwd_arg;
2246 tmp_lcore = *fwd_lc;
2247 tmp_lcore.stopped = 1;
2248 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2253 * Launch packet forwarding:
2254 * - Setup per-port forwarding context.
2255 * - launch logical cores with their forwarding configuration.
2258 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2264 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2265 lc_id = fwd_lcores_cpuids[i];
2266 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2267 fwd_lcores[i]->stopped = 0;
2268 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2269 fwd_lcores[i], lc_id);
2272 "launch lcore %u failed - diag=%d\n",
2279 * Launch packet forwarding configuration.
2282 start_packet_forwarding(int with_tx_first)
2284 port_fwd_begin_t port_fwd_begin;
2285 port_fwd_end_t port_fwd_end;
2288 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2289 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2291 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2292 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2294 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2295 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2296 (!nb_rxq || !nb_txq))
2297 rte_exit(EXIT_FAILURE,
2298 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2299 cur_fwd_eng->fwd_mode_name);
2301 if (all_ports_started() == 0) {
2302 fprintf(stderr, "Not all ports were started\n");
2305 if (test_done == 0) {
2306 fprintf(stderr, "Packet forwarding already started\n");
2312 pkt_fwd_config_display(&cur_fwd_config);
2313 if (!pkt_fwd_shared_rxq_check())
2316 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2317 if (port_fwd_begin != NULL) {
2318 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2319 if (port_fwd_begin(fwd_ports_ids[i])) {
2321 "Packet forwarding is not ready\n");
2327 if (with_tx_first) {
2328 port_fwd_begin = tx_only_engine.port_fwd_begin;
2329 if (port_fwd_begin != NULL) {
2330 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2331 if (port_fwd_begin(fwd_ports_ids[i])) {
2333 "Packet forwarding is not ready\n");
2343 flush_fwd_rx_queues();
2345 rxtx_config_display();
2348 if (with_tx_first) {
2349 while (with_tx_first--) {
2350 launch_packet_forwarding(
2351 run_one_txonly_burst_on_core);
2352 rte_eal_mp_wait_lcore();
2354 port_fwd_end = tx_only_engine.port_fwd_end;
2355 if (port_fwd_end != NULL) {
2356 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2357 (*port_fwd_end)(fwd_ports_ids[i]);
2360 launch_packet_forwarding(start_pkt_forward_on_core);
2364 stop_packet_forwarding(void)
2366 port_fwd_end_t port_fwd_end;
2372 fprintf(stderr, "Packet forwarding not started\n");
2375 printf("Telling cores to stop...");
2376 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2377 fwd_lcores[lc_id]->stopped = 1;
2378 printf("\nWaiting for lcores to finish...\n");
2379 rte_eal_mp_wait_lcore();
2380 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2381 if (port_fwd_end != NULL) {
2382 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2383 pt_id = fwd_ports_ids[i];
2384 (*port_fwd_end)(pt_id);
2388 fwd_stats_display();
2390 printf("\nDone.\n");
2395 dev_set_link_up(portid_t pid)
2397 if (rte_eth_dev_set_link_up(pid) < 0)
2398 fprintf(stderr, "\nSet link up fail.\n");
2402 dev_set_link_down(portid_t pid)
2404 if (rte_eth_dev_set_link_down(pid) < 0)
2405 fprintf(stderr, "\nSet link down fail.\n");
2409 all_ports_started(void)
2412 struct rte_port *port;
2414 RTE_ETH_FOREACH_DEV(pi) {
2416 /* Check if there is a port which is not started */
2417 if ((port->port_status != RTE_PORT_STARTED) &&
2418 (port->slave_flag == 0))
2422 /* No port is not started */
2427 port_is_stopped(portid_t port_id)
2429 struct rte_port *port = &ports[port_id];
2431 if ((port->port_status != RTE_PORT_STOPPED) &&
2432 (port->slave_flag == 0))
2438 all_ports_stopped(void)
2442 RTE_ETH_FOREACH_DEV(pi) {
2443 if (!port_is_stopped(pi))
2451 port_is_started(portid_t port_id)
2453 if (port_id_is_invalid(port_id, ENABLED_WARN))
2456 if (ports[port_id].port_status != RTE_PORT_STARTED)
2462 /* Configure the Rx and Tx hairpin queues for the selected port. */
2464 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2467 struct rte_eth_hairpin_conf hairpin_conf = {
2472 struct rte_port *port = &ports[pi];
2473 uint16_t peer_rx_port = pi;
2474 uint16_t peer_tx_port = pi;
2475 uint32_t manual = 1;
2476 uint32_t tx_exp = hairpin_mode & 0x10;
2478 if (!(hairpin_mode & 0xf)) {
2482 } else if (hairpin_mode & 0x1) {
2483 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2484 RTE_ETH_DEV_NO_OWNER);
2485 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2486 peer_tx_port = rte_eth_find_next_owned_by(0,
2487 RTE_ETH_DEV_NO_OWNER);
2488 if (p_pi != RTE_MAX_ETHPORTS) {
2489 peer_rx_port = p_pi;
2493 /* Last port will be the peer RX port of the first. */
2494 RTE_ETH_FOREACH_DEV(next_pi)
2495 peer_rx_port = next_pi;
2498 } else if (hairpin_mode & 0x2) {
2500 peer_rx_port = p_pi;
2502 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2503 RTE_ETH_DEV_NO_OWNER);
2504 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2507 peer_tx_port = peer_rx_port;
2511 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2512 hairpin_conf.peers[0].port = peer_rx_port;
2513 hairpin_conf.peers[0].queue = i + nb_rxq;
2514 hairpin_conf.manual_bind = !!manual;
2515 hairpin_conf.tx_explicit = !!tx_exp;
2516 diag = rte_eth_tx_hairpin_queue_setup
2517 (pi, qi, nb_txd, &hairpin_conf);
2522 /* Fail to setup rx queue, return */
2523 if (port->port_status == RTE_PORT_HANDLING)
2524 port->port_status = RTE_PORT_STOPPED;
2527 "Port %d can not be set back to stopped\n", pi);
2528 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2530 /* try to reconfigure queues next time */
2531 port->need_reconfig_queues = 1;
2534 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2535 hairpin_conf.peers[0].port = peer_tx_port;
2536 hairpin_conf.peers[0].queue = i + nb_txq;
2537 hairpin_conf.manual_bind = !!manual;
2538 hairpin_conf.tx_explicit = !!tx_exp;
2539 diag = rte_eth_rx_hairpin_queue_setup
2540 (pi, qi, nb_rxd, &hairpin_conf);
2545 /* Fail to setup rx queue, return */
2546 if (port->port_status == RTE_PORT_HANDLING)
2547 port->port_status = RTE_PORT_STOPPED;
2550 "Port %d can not be set back to stopped\n", pi);
2551 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2553 /* try to reconfigure queues next time */
2554 port->need_reconfig_queues = 1;
2560 /* Configure the Rx with optional split. */
2562 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2563 uint16_t nb_rx_desc, unsigned int socket_id,
2564 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2566 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2567 unsigned int i, mp_n;
2570 if (rx_pkt_nb_segs <= 1 ||
2571 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2572 rx_conf->rx_seg = NULL;
2573 rx_conf->rx_nseg = 0;
2574 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2575 nb_rx_desc, socket_id,
2579 for (i = 0; i < rx_pkt_nb_segs; i++) {
2580 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2581 struct rte_mempool *mpx;
2583 * Use last valid pool for the segments with number
2584 * exceeding the pool index.
2586 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2587 mpx = mbuf_pool_find(socket_id, mp_n);
2588 /* Handle zero as mbuf data buffer size. */
2589 rx_seg->length = rx_pkt_seg_lengths[i] ?
2590 rx_pkt_seg_lengths[i] :
2591 mbuf_data_size[mp_n];
2592 rx_seg->offset = i < rx_pkt_nb_offs ?
2593 rx_pkt_seg_offsets[i] : 0;
2594 rx_seg->mp = mpx ? mpx : mp;
2596 rx_conf->rx_nseg = rx_pkt_nb_segs;
2597 rx_conf->rx_seg = rx_useg;
2598 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2599 socket_id, rx_conf, NULL);
2600 rx_conf->rx_seg = NULL;
2601 rx_conf->rx_nseg = 0;
2606 alloc_xstats_display_info(portid_t pi)
2608 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2609 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2610 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2612 if (xstats_display_num == 0)
2615 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2616 if (*ids_supp == NULL)
2619 *prev_values = calloc(xstats_display_num,
2620 sizeof(**prev_values));
2621 if (*prev_values == NULL)
2622 goto fail_prev_values;
2624 *curr_values = calloc(xstats_display_num,
2625 sizeof(**curr_values));
2626 if (*curr_values == NULL)
2627 goto fail_curr_values;
2629 ports[pi].xstats_info.allocated = true;
2642 free_xstats_display_info(portid_t pi)
2644 if (!ports[pi].xstats_info.allocated)
2646 free(ports[pi].xstats_info.ids_supp);
2647 free(ports[pi].xstats_info.prev_values);
2648 free(ports[pi].xstats_info.curr_values);
2649 ports[pi].xstats_info.allocated = false;
2652 /** Fill helper structures for specified port to show extended statistics. */
2654 fill_xstats_display_info_for_port(portid_t pi)
2656 unsigned int stat, stat_supp;
2657 const char *xstat_name;
2658 struct rte_port *port;
2662 if (xstats_display_num == 0)
2665 if (pi == (portid_t)RTE_PORT_ALL) {
2666 fill_xstats_display_info();
2671 if (port->port_status != RTE_PORT_STARTED)
2674 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2675 rte_exit(EXIT_FAILURE,
2676 "Failed to allocate xstats display memory\n");
2678 ids_supp = port->xstats_info.ids_supp;
2679 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2680 xstat_name = xstats_display[stat].name;
2681 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2682 ids_supp + stat_supp);
2684 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2685 xstat_name, pi, stat);
2691 port->xstats_info.ids_supp_sz = stat_supp;
2694 /** Fill helper structures for all ports to show extended statistics. */
2696 fill_xstats_display_info(void)
2700 if (xstats_display_num == 0)
2703 RTE_ETH_FOREACH_DEV(pi)
2704 fill_xstats_display_info_for_port(pi);
2708 start_port(portid_t pid)
2710 int diag, need_check_link_status = -1;
2712 portid_t p_pi = RTE_MAX_ETHPORTS;
2713 portid_t pl[RTE_MAX_ETHPORTS];
2714 portid_t peer_pl[RTE_MAX_ETHPORTS];
2715 uint16_t cnt_pi = 0;
2716 uint16_t cfg_pi = 0;
2719 struct rte_port *port;
2720 struct rte_eth_hairpin_cap cap;
2722 if (port_id_is_invalid(pid, ENABLED_WARN))
2725 RTE_ETH_FOREACH_DEV(pi) {
2726 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2729 need_check_link_status = 0;
2731 if (port->port_status == RTE_PORT_STOPPED)
2732 port->port_status = RTE_PORT_HANDLING;
2734 fprintf(stderr, "Port %d is now not stopped\n", pi);
2738 if (port->need_reconfig > 0) {
2739 struct rte_eth_conf dev_conf;
2742 port->need_reconfig = 0;
2744 if (flow_isolate_all) {
2745 int ret = port_flow_isolate(pi, 1);
2748 "Failed to apply isolated mode on port %d\n",
2753 configure_rxtx_dump_callbacks(0);
2754 printf("Configuring Port %d (socket %u)\n", pi,
2756 if (nb_hairpinq > 0 &&
2757 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2759 "Port %d doesn't support hairpin queues\n",
2764 /* configure port */
2765 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2766 nb_txq + nb_hairpinq,
2769 if (port->port_status == RTE_PORT_HANDLING)
2770 port->port_status = RTE_PORT_STOPPED;
2773 "Port %d can not be set back to stopped\n",
2775 fprintf(stderr, "Fail to configure port %d\n",
2777 /* try to reconfigure port next time */
2778 port->need_reconfig = 1;
2781 /* get device configuration*/
2783 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2785 "port %d can not get device configuration\n",
2789 /* Apply Rx offloads configuration */
2790 if (dev_conf.rxmode.offloads !=
2791 port->dev_conf.rxmode.offloads) {
2792 port->dev_conf.rxmode.offloads |=
2793 dev_conf.rxmode.offloads;
2795 k < port->dev_info.max_rx_queues;
2797 port->rx_conf[k].offloads |=
2798 dev_conf.rxmode.offloads;
2800 /* Apply Tx offloads configuration */
2801 if (dev_conf.txmode.offloads !=
2802 port->dev_conf.txmode.offloads) {
2803 port->dev_conf.txmode.offloads |=
2804 dev_conf.txmode.offloads;
2806 k < port->dev_info.max_tx_queues;
2808 port->tx_conf[k].offloads |=
2809 dev_conf.txmode.offloads;
2812 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2813 port->need_reconfig_queues = 0;
2814 /* setup tx queues */
2815 for (qi = 0; qi < nb_txq; qi++) {
2816 if ((numa_support) &&
2817 (txring_numa[pi] != NUMA_NO_CONFIG))
2818 diag = rte_eth_tx_queue_setup(pi, qi,
2819 port->nb_tx_desc[qi],
2821 &(port->tx_conf[qi]));
2823 diag = rte_eth_tx_queue_setup(pi, qi,
2824 port->nb_tx_desc[qi],
2826 &(port->tx_conf[qi]));
2831 /* Fail to setup tx queue, return */
2832 if (port->port_status == RTE_PORT_HANDLING)
2833 port->port_status = RTE_PORT_STOPPED;
2836 "Port %d can not be set back to stopped\n",
2839 "Fail to configure port %d tx queues\n",
2841 /* try to reconfigure queues next time */
2842 port->need_reconfig_queues = 1;
2845 for (qi = 0; qi < nb_rxq; qi++) {
2846 /* setup rx queues */
2847 if ((numa_support) &&
2848 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2849 struct rte_mempool * mp =
2851 (rxring_numa[pi], 0);
2854 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2859 diag = rx_queue_setup(pi, qi,
2860 port->nb_rx_desc[qi],
2862 &(port->rx_conf[qi]),
2865 struct rte_mempool *mp =
2867 (port->socket_id, 0);
2870 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2874 diag = rx_queue_setup(pi, qi,
2875 port->nb_rx_desc[qi],
2877 &(port->rx_conf[qi]),
2883 /* Fail to setup rx queue, return */
2884 if (port->port_status == RTE_PORT_HANDLING)
2885 port->port_status = RTE_PORT_STOPPED;
2888 "Port %d can not be set back to stopped\n",
2891 "Fail to configure port %d rx queues\n",
2893 /* try to reconfigure queues next time */
2894 port->need_reconfig_queues = 1;
2897 /* setup hairpin queues */
2898 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2901 configure_rxtx_dump_callbacks(verbose_level);
2903 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2907 "Port %d: Failed to disable Ptype parsing\n",
2915 diag = eth_dev_start_mp(pi);
2917 fprintf(stderr, "Fail to start port %d: %s\n",
2918 pi, rte_strerror(-diag));
2920 /* Fail to setup rx queue, return */
2921 if (port->port_status == RTE_PORT_HANDLING)
2922 port->port_status = RTE_PORT_STOPPED;
2925 "Port %d can not be set back to stopped\n",
2930 if (port->port_status == RTE_PORT_HANDLING)
2931 port->port_status = RTE_PORT_STARTED;
2933 fprintf(stderr, "Port %d can not be set into started\n",
2936 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2937 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2938 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2940 /* at least one port started, need checking link status */
2941 need_check_link_status = 1;
2946 if (need_check_link_status == 1 && !no_link_check)
2947 check_all_ports_link_status(RTE_PORT_ALL);
2948 else if (need_check_link_status == 0)
2949 fprintf(stderr, "Please stop the ports first\n");
2951 if (hairpin_mode & 0xf) {
2955 /* bind all started hairpin ports */
2956 for (i = 0; i < cfg_pi; i++) {
2958 /* bind current Tx to all peer Rx */
2959 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2960 RTE_MAX_ETHPORTS, 1);
2963 for (j = 0; j < peer_pi; j++) {
2964 if (!port_is_started(peer_pl[j]))
2966 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2969 "Error during binding hairpin Tx port %u to %u: %s\n",
2971 rte_strerror(-diag));
2975 /* bind all peer Tx to current Rx */
2976 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2977 RTE_MAX_ETHPORTS, 0);
2980 for (j = 0; j < peer_pi; j++) {
2981 if (!port_is_started(peer_pl[j]))
2983 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2986 "Error during binding hairpin Tx port %u to %u: %s\n",
2988 rte_strerror(-diag));
2995 fill_xstats_display_info_for_port(pid);
3002 stop_port(portid_t pid)
3005 struct rte_port *port;
3006 int need_check_link_status = 0;
3007 portid_t peer_pl[RTE_MAX_ETHPORTS];
3010 if (port_id_is_invalid(pid, ENABLED_WARN))
3013 printf("Stopping ports...\n");
3015 RTE_ETH_FOREACH_DEV(pi) {
3016 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3019 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3021 "Please remove port %d from forwarding configuration.\n",
3026 if (port_is_bonding_slave(pi)) {
3028 "Please remove port %d from bonded device.\n",
3034 if (port->port_status == RTE_PORT_STARTED)
3035 port->port_status = RTE_PORT_HANDLING;
3039 if (hairpin_mode & 0xf) {
3042 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3043 /* unbind all peer Tx from current Rx */
3044 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3045 RTE_MAX_ETHPORTS, 0);
3048 for (j = 0; j < peer_pi; j++) {
3049 if (!port_is_started(peer_pl[j]))
3051 rte_eth_hairpin_unbind(peer_pl[j], pi);
3055 if (port->flow_list)
3056 port_flow_flush(pi);
3058 if (eth_dev_stop_mp(pi) != 0)
3059 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3062 if (port->port_status == RTE_PORT_HANDLING)
3063 port->port_status = RTE_PORT_STOPPED;
3065 fprintf(stderr, "Port %d can not be set into stopped\n",
3067 need_check_link_status = 1;
3069 if (need_check_link_status && !no_link_check)
3070 check_all_ports_link_status(RTE_PORT_ALL);
3076 remove_invalid_ports_in(portid_t *array, portid_t *total)
3079 portid_t new_total = 0;
3081 for (i = 0; i < *total; i++)
3082 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3083 array[new_total] = array[i];
3090 remove_invalid_ports(void)
3092 remove_invalid_ports_in(ports_ids, &nb_ports);
3093 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3094 nb_cfg_ports = nb_fwd_ports;
3098 close_port(portid_t pid)
3101 struct rte_port *port;
3103 if (port_id_is_invalid(pid, ENABLED_WARN))
3106 printf("Closing ports...\n");
3108 RTE_ETH_FOREACH_DEV(pi) {
3109 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3112 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3114 "Please remove port %d from forwarding configuration.\n",
3119 if (port_is_bonding_slave(pi)) {
3121 "Please remove port %d from bonded device.\n",
3127 if (port->port_status == RTE_PORT_CLOSED) {
3128 fprintf(stderr, "Port %d is already closed\n", pi);
3132 if (is_proc_primary()) {
3133 port_flow_flush(pi);
3134 port_flex_item_flush(pi);
3135 rte_eth_dev_close(pi);
3138 free_xstats_display_info(pi);
3141 remove_invalid_ports();
3146 reset_port(portid_t pid)
3150 struct rte_port *port;
3152 if (port_id_is_invalid(pid, ENABLED_WARN))
3155 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3156 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3158 "Can not reset port(s), please stop port(s) first.\n");
3162 printf("Resetting ports...\n");
3164 RTE_ETH_FOREACH_DEV(pi) {
3165 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3168 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3170 "Please remove port %d from forwarding configuration.\n",
3175 if (port_is_bonding_slave(pi)) {
3177 "Please remove port %d from bonded device.\n",
3182 diag = rte_eth_dev_reset(pi);
3185 port->need_reconfig = 1;
3186 port->need_reconfig_queues = 1;
3188 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3197 attach_port(char *identifier)
3200 struct rte_dev_iterator iterator;
3202 printf("Attaching a new port...\n");
3204 if (identifier == NULL) {
3205 fprintf(stderr, "Invalid parameters are specified\n");
3209 if (rte_dev_probe(identifier) < 0) {
3210 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3214 /* first attach mode: event */
3215 if (setup_on_probe_event) {
3216 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3217 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3218 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3219 ports[pi].need_setup != 0)
3220 setup_attached_port(pi);
3224 /* second attach mode: iterator */
3225 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3226 /* setup ports matching the devargs used for probing */
3227 if (port_is_forwarding(pi))
3228 continue; /* port was already attached before */
3229 setup_attached_port(pi);
3234 setup_attached_port(portid_t pi)
3236 unsigned int socket_id;
3239 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3240 /* if socket_id is invalid, set to the first available socket. */
3241 if (check_socket_id(socket_id) < 0)
3242 socket_id = socket_ids[0];
3243 reconfig(pi, socket_id);
3244 ret = rte_eth_promiscuous_enable(pi);
3247 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3248 pi, rte_strerror(-ret));
3250 ports_ids[nb_ports++] = pi;
3251 fwd_ports_ids[nb_fwd_ports++] = pi;
3252 nb_cfg_ports = nb_fwd_ports;
3253 ports[pi].need_setup = 0;
3254 ports[pi].port_status = RTE_PORT_STOPPED;
3256 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3261 detach_device(struct rte_device *dev)
3266 fprintf(stderr, "Device already removed\n");
3270 printf("Removing a device...\n");
3272 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3273 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3274 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3275 fprintf(stderr, "Port %u not stopped\n",
3279 port_flow_flush(sibling);
3283 if (rte_dev_remove(dev) < 0) {
3284 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3287 remove_invalid_ports();
3289 printf("Device is detached\n");
3290 printf("Now total ports is %d\n", nb_ports);
3296 detach_port_device(portid_t port_id)
3299 struct rte_eth_dev_info dev_info;
3301 if (port_id_is_invalid(port_id, ENABLED_WARN))
3304 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3305 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3306 fprintf(stderr, "Port not stopped\n");
3309 fprintf(stderr, "Port was not closed\n");
3312 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3315 "Failed to get device info for port %d, not detaching\n",
3319 detach_device(dev_info.device);
3323 detach_devargs(char *identifier)
3325 struct rte_dev_iterator iterator;
3326 struct rte_devargs da;
3329 printf("Removing a device...\n");
3331 memset(&da, 0, sizeof(da));
3332 if (rte_devargs_parsef(&da, "%s", identifier)) {
3333 fprintf(stderr, "cannot parse identifier\n");
3337 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3338 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3339 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3340 fprintf(stderr, "Port %u not stopped\n",
3342 rte_eth_iterator_cleanup(&iterator);
3343 rte_devargs_reset(&da);
3346 port_flow_flush(port_id);
3350 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3351 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3352 da.name, da.bus->name);
3353 rte_devargs_reset(&da);
3357 remove_invalid_ports();
3359 printf("Device %s is detached\n", identifier);
3360 printf("Now total ports is %d\n", nb_ports);
3362 rte_devargs_reset(&da);
3373 stop_packet_forwarding();
3375 #ifndef RTE_EXEC_ENV_WINDOWS
3376 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3378 if (mp_alloc_type == MP_ALLOC_ANON)
3379 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3384 if (ports != NULL) {
3386 RTE_ETH_FOREACH_DEV(pt_id) {
3387 printf("\nStopping port %d...\n", pt_id);
3391 RTE_ETH_FOREACH_DEV(pt_id) {
3392 printf("\nShutting down port %d...\n", pt_id);
3399 ret = rte_dev_event_monitor_stop();
3402 "fail to stop device event monitor.");
3406 ret = rte_dev_event_callback_unregister(NULL,
3407 dev_event_callback, NULL);
3410 "fail to unregister device event callback.\n");
3414 ret = rte_dev_hotplug_handle_disable();
3417 "fail to disable hotplug handling.\n");
3421 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3423 mempool_free_mp(mempools[i]);
3425 free(xstats_display);
3427 printf("\nBye...\n");
3430 typedef void (*cmd_func_t)(void);
3431 struct pmd_test_command {
3432 const char *cmd_name;
3433 cmd_func_t cmd_func;
3436 /* Check the link status of all ports in up to 9s, and print them finally */
3438 check_all_ports_link_status(uint32_t port_mask)
3440 #define CHECK_INTERVAL 100 /* 100ms */
3441 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3443 uint8_t count, all_ports_up, print_flag = 0;
3444 struct rte_eth_link link;
3446 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3448 printf("Checking link statuses...\n");
3450 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3452 RTE_ETH_FOREACH_DEV(portid) {
3453 if ((port_mask & (1 << portid)) == 0)
3455 memset(&link, 0, sizeof(link));
3456 ret = rte_eth_link_get_nowait(portid, &link);
3459 if (print_flag == 1)
3461 "Port %u link get failed: %s\n",
3462 portid, rte_strerror(-ret));
3465 /* print link status if flag set */
3466 if (print_flag == 1) {
3467 rte_eth_link_to_str(link_status,
3468 sizeof(link_status), &link);
3469 printf("Port %d %s\n", portid, link_status);
3472 /* clear all_ports_up flag if any link down */
3473 if (link.link_status == RTE_ETH_LINK_DOWN) {
3478 /* after finally printing all link status, get out */
3479 if (print_flag == 1)
3482 if (all_ports_up == 0) {
3484 rte_delay_ms(CHECK_INTERVAL);
3487 /* set the print_flag if all ports up or timeout */
3488 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3498 rmv_port_callback(void *arg)
3500 int need_to_start = 0;
3501 int org_no_link_check = no_link_check;
3502 portid_t port_id = (intptr_t)arg;
3503 struct rte_eth_dev_info dev_info;
3506 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3508 if (!test_done && port_is_forwarding(port_id)) {
3510 stop_packet_forwarding();
3514 no_link_check = org_no_link_check;
3516 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3519 "Failed to get device info for port %d, not detaching\n",
3522 struct rte_device *device = dev_info.device;
3523 close_port(port_id);
3524 detach_device(device); /* might be already removed or have more ports */
3527 start_packet_forwarding(0);
3530 /* This function is used by the interrupt thread */
3532 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3535 RTE_SET_USED(param);
3536 RTE_SET_USED(ret_param);
3538 if (type >= RTE_ETH_EVENT_MAX) {
3540 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3541 port_id, __func__, type);
3543 } else if (event_print_mask & (UINT32_C(1) << type)) {
3544 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3545 eth_event_desc[type]);
3550 case RTE_ETH_EVENT_NEW:
3551 ports[port_id].need_setup = 1;
3552 ports[port_id].port_status = RTE_PORT_HANDLING;
3554 case RTE_ETH_EVENT_INTR_RMV:
3555 if (port_id_is_invalid(port_id, DISABLED_WARN))
3557 if (rte_eal_alarm_set(100000,
3558 rmv_port_callback, (void *)(intptr_t)port_id))
3560 "Could not set up deferred device removal\n");
3562 case RTE_ETH_EVENT_DESTROY:
3563 ports[port_id].port_status = RTE_PORT_CLOSED;
3564 printf("Port %u is closed\n", port_id);
3573 register_eth_event_callback(void)
3576 enum rte_eth_event_type event;
3578 for (event = RTE_ETH_EVENT_UNKNOWN;
3579 event < RTE_ETH_EVENT_MAX; event++) {
3580 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3585 TESTPMD_LOG(ERR, "Failed to register callback for "
3586 "%s event\n", eth_event_desc[event]);
3594 /* This function is used by the interrupt thread */
3596 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3597 __rte_unused void *arg)
3602 if (type >= RTE_DEV_EVENT_MAX) {
3603 fprintf(stderr, "%s called upon invalid event %d\n",
3609 case RTE_DEV_EVENT_REMOVE:
3610 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3612 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3614 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3619 * Because the user's callback is invoked in eal interrupt
3620 * callback, the interrupt callback need to be finished before
3621 * it can be unregistered when detaching device. So finish
3622 * callback soon and use a deferred removal to detach device
3623 * is need. It is a workaround, once the device detaching be
3624 * moved into the eal in the future, the deferred removal could
3627 if (rte_eal_alarm_set(100000,
3628 rmv_port_callback, (void *)(intptr_t)port_id))
3630 "Could not set up deferred device removal\n");
3632 case RTE_DEV_EVENT_ADD:
3633 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3635 /* TODO: After finish kernel driver binding,
3636 * begin to attach port.
3645 rxtx_port_config(portid_t pid)
3649 struct rte_port *port = &ports[pid];
3651 for (qid = 0; qid < nb_rxq; qid++) {
3652 offloads = port->rx_conf[qid].offloads;
3653 port->rx_conf[qid] = port->dev_info.default_rxconf;
3655 if (rxq_share > 0 &&
3656 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3657 /* Non-zero share group to enable RxQ share. */
3658 port->rx_conf[qid].share_group = pid / rxq_share + 1;
3659 port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3663 port->rx_conf[qid].offloads = offloads;
3665 /* Check if any Rx parameters have been passed */
3666 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3667 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3669 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3670 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3672 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3673 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3675 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3676 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3678 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3679 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3681 port->nb_rx_desc[qid] = nb_rxd;
3684 for (qid = 0; qid < nb_txq; qid++) {
3685 offloads = port->tx_conf[qid].offloads;
3686 port->tx_conf[qid] = port->dev_info.default_txconf;
3688 port->tx_conf[qid].offloads = offloads;
3690 /* Check if any Tx parameters have been passed */
3691 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3692 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3694 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3695 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3697 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3698 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3700 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3701 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3703 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3704 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3706 port->nb_tx_desc[qid] = nb_txd;
3711 * Helper function to set MTU from frame size
3713 * port->dev_info should be set before calling this function.
3715 * return 0 on success, negative on error
3718 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3720 struct rte_port *port = &ports[portid];
3721 uint32_t eth_overhead;
3722 uint16_t mtu, new_mtu;
3724 eth_overhead = get_eth_overhead(&port->dev_info);
3726 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3727 printf("Failed to get MTU for port %u\n", portid);
3731 new_mtu = max_rx_pktlen - eth_overhead;
3736 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3738 "Failed to set MTU to %u for port %u\n",
3743 port->dev_conf.rxmode.mtu = new_mtu;
3749 init_port_config(void)
3752 struct rte_port *port;
3755 RTE_ETH_FOREACH_DEV(pid) {
3757 port->dev_conf.fdir_conf = fdir_conf;
3759 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3764 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3765 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3766 rss_hf & port->dev_info.flow_type_rss_offloads;
3768 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3769 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3772 if (port->dcb_flag == 0) {
3773 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3774 port->dev_conf.rxmode.mq_mode =
3775 (enum rte_eth_rx_mq_mode)
3776 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3778 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3779 port->dev_conf.rxmode.offloads &=
3780 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3783 i < port->dev_info.nb_rx_queues;
3785 port->rx_conf[i].offloads &=
3786 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3790 rxtx_port_config(pid);
3792 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3796 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3797 rte_pmd_ixgbe_bypass_init(pid);
3800 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3801 port->dev_conf.intr_conf.lsc = 1;
3802 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3803 port->dev_conf.intr_conf.rmv = 1;
3807 void set_port_slave_flag(portid_t slave_pid)
3809 struct rte_port *port;
3811 port = &ports[slave_pid];
3812 port->slave_flag = 1;
3815 void clear_port_slave_flag(portid_t slave_pid)
3817 struct rte_port *port;
3819 port = &ports[slave_pid];
3820 port->slave_flag = 0;
3823 uint8_t port_is_bonding_slave(portid_t slave_pid)
3825 struct rte_port *port;
3826 struct rte_eth_dev_info dev_info;
3829 port = &ports[slave_pid];
3830 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3833 "Failed to get device info for port id %d,"
3834 "cannot determine if the port is a bonded slave",
3838 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3843 const uint16_t vlan_tags[] = {
3844 0, 1, 2, 3, 4, 5, 6, 7,
3845 8, 9, 10, 11, 12, 13, 14, 15,
3846 16, 17, 18, 19, 20, 21, 22, 23,
3847 24, 25, 26, 27, 28, 29, 30, 31
3851 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3852 enum dcb_mode_enable dcb_mode,
3853 enum rte_eth_nb_tcs num_tcs,
3858 struct rte_eth_rss_conf rss_conf;
3861 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3862 * given above, and the number of traffic classes available for use.
3864 if (dcb_mode == DCB_VT_ENABLED) {
3865 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3866 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3867 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3868 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3870 /* VMDQ+DCB RX and TX configurations */
3871 vmdq_rx_conf->enable_default_pool = 0;
3872 vmdq_rx_conf->default_pool = 0;
3873 vmdq_rx_conf->nb_queue_pools =
3874 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3875 vmdq_tx_conf->nb_queue_pools =
3876 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3878 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3879 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3880 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3881 vmdq_rx_conf->pool_map[i].pools =
3882 1 << (i % vmdq_rx_conf->nb_queue_pools);
3884 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3885 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3886 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3889 /* set DCB mode of RX and TX of multiple queues */
3890 eth_conf->rxmode.mq_mode =
3891 (enum rte_eth_rx_mq_mode)
3892 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
3893 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
3895 struct rte_eth_dcb_rx_conf *rx_conf =
3896 ð_conf->rx_adv_conf.dcb_rx_conf;
3897 struct rte_eth_dcb_tx_conf *tx_conf =
3898 ð_conf->tx_adv_conf.dcb_tx_conf;
3900 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3902 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3906 rx_conf->nb_tcs = num_tcs;
3907 tx_conf->nb_tcs = num_tcs;
3909 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3910 rx_conf->dcb_tc[i] = i % num_tcs;
3911 tx_conf->dcb_tc[i] = i % num_tcs;
3914 eth_conf->rxmode.mq_mode =
3915 (enum rte_eth_rx_mq_mode)
3916 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
3917 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3918 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
3922 eth_conf->dcb_capability_en =
3923 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
3925 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
3931 init_port_dcb_config(portid_t pid,
3932 enum dcb_mode_enable dcb_mode,
3933 enum rte_eth_nb_tcs num_tcs,
3936 struct rte_eth_conf port_conf;
3937 struct rte_port *rte_port;
3941 if (num_procs > 1) {
3942 printf("The multi-process feature doesn't support dcb.\n");
3945 rte_port = &ports[pid];
3947 /* retain the original device configuration. */
3948 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3950 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3951 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3954 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3955 /* remove RSS HASH offload for DCB in vt mode */
3956 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
3957 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3958 for (i = 0; i < nb_rxq; i++)
3959 rte_port->rx_conf[i].offloads &=
3960 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3963 /* re-configure the device . */
3964 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3968 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3972 /* If dev_info.vmdq_pool_base is greater than 0,
3973 * the queue id of vmdq pools is started after pf queues.
3975 if (dcb_mode == DCB_VT_ENABLED &&
3976 rte_port->dev_info.vmdq_pool_base > 0) {
3978 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3983 /* Assume the ports in testpmd have the same dcb capability
3984 * and has the same number of rxq and txq in dcb mode
3986 if (dcb_mode == DCB_VT_ENABLED) {
3987 if (rte_port->dev_info.max_vfs > 0) {
3988 nb_rxq = rte_port->dev_info.nb_rx_queues;
3989 nb_txq = rte_port->dev_info.nb_tx_queues;
3991 nb_rxq = rte_port->dev_info.max_rx_queues;
3992 nb_txq = rte_port->dev_info.max_tx_queues;
3995 /*if vt is disabled, use all pf queues */
3996 if (rte_port->dev_info.vmdq_pool_base == 0) {
3997 nb_rxq = rte_port->dev_info.max_rx_queues;
3998 nb_txq = rte_port->dev_info.max_tx_queues;
4000 nb_rxq = (queueid_t)num_tcs;
4001 nb_txq = (queueid_t)num_tcs;
4005 rx_free_thresh = 64;
4007 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4009 rxtx_port_config(pid);
4011 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4012 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4013 rx_vft_set(pid, vlan_tags[i], 1);
4015 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4019 rte_port->dcb_flag = 1;
4021 /* Enter DCB configuration status */
4032 /* Configuration of Ethernet ports. */
4033 ports = rte_zmalloc("testpmd: ports",
4034 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4035 RTE_CACHE_LINE_SIZE);
4036 if (ports == NULL) {
4037 rte_exit(EXIT_FAILURE,
4038 "rte_zmalloc(%d struct rte_port) failed\n",
4041 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4042 ports[i].xstats_info.allocated = false;
4043 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4044 LIST_INIT(&ports[i].flow_tunnel_list);
4045 /* Initialize ports NUMA structures */
4046 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4047 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4048 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4062 const char clr[] = { 27, '[', '2', 'J', '\0' };
4063 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4065 /* Clear screen and move to top left */
4066 printf("%s%s", clr, top_left);
4068 printf("\nPort statistics ====================================");
4069 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4070 nic_stats_display(fwd_ports_ids[i]);
4076 signal_handler(int signum)
4078 if (signum == SIGINT || signum == SIGTERM) {
4079 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4081 #ifdef RTE_LIB_PDUMP
4082 /* uninitialize packet capture framework */
4085 #ifdef RTE_LIB_LATENCYSTATS
4086 if (latencystats_enabled != 0)
4087 rte_latencystats_uninit();
4090 /* Set flag to indicate the force termination. */
4092 /* exit with the expected status */
4093 #ifndef RTE_EXEC_ENV_WINDOWS
4094 signal(signum, SIG_DFL);
4095 kill(getpid(), signum);
4101 main(int argc, char** argv)
4108 signal(SIGINT, signal_handler);
4109 signal(SIGTERM, signal_handler);
4111 testpmd_logtype = rte_log_register("testpmd");
4112 if (testpmd_logtype < 0)
4113 rte_exit(EXIT_FAILURE, "Cannot register log type");
4114 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4116 diag = rte_eal_init(argc, argv);
4118 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4119 rte_strerror(rte_errno));
4121 ret = register_eth_event_callback();
4123 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4125 #ifdef RTE_LIB_PDUMP
4126 /* initialize packet capture framework */
4131 RTE_ETH_FOREACH_DEV(port_id) {
4132 ports_ids[count] = port_id;
4135 nb_ports = (portid_t) count;
4137 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4139 /* allocate port structures, and init them */
4142 set_def_fwd_config();
4144 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4145 "Check the core mask argument\n");
4147 /* Bitrate/latency stats disabled by default */
4148 #ifdef RTE_LIB_BITRATESTATS
4149 bitrate_enabled = 0;
4151 #ifdef RTE_LIB_LATENCYSTATS
4152 latencystats_enabled = 0;
4155 /* on FreeBSD, mlockall() is disabled by default */
4156 #ifdef RTE_EXEC_ENV_FREEBSD
4165 launch_args_parse(argc, argv);
4167 #ifndef RTE_EXEC_ENV_WINDOWS
4168 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4169 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4174 if (tx_first && interactive)
4175 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4176 "interactive mode.\n");
4178 if (tx_first && lsc_interrupt) {
4180 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4184 if (!nb_rxq && !nb_txq)
4186 "Warning: Either rx or tx queues should be non-zero\n");
4188 if (nb_rxq > 1 && nb_rxq > nb_txq)
4190 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4196 ret = rte_dev_hotplug_handle_enable();
4199 "fail to enable hotplug handling.");
4203 ret = rte_dev_event_monitor_start();
4206 "fail to start device event monitoring.");
4210 ret = rte_dev_event_callback_register(NULL,
4211 dev_event_callback, NULL);
4214 "fail to register device event callback\n");
4219 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4220 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4222 /* set all ports to promiscuous mode by default */
4223 RTE_ETH_FOREACH_DEV(port_id) {
4224 ret = rte_eth_promiscuous_enable(port_id);
4227 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4228 port_id, rte_strerror(-ret));
4231 #ifdef RTE_LIB_METRICS
4232 /* Init metrics library */
4233 rte_metrics_init(rte_socket_id());
4236 #ifdef RTE_LIB_LATENCYSTATS
4237 if (latencystats_enabled != 0) {
4238 int ret = rte_latencystats_init(1, NULL);
4241 "Warning: latencystats init() returned error %d\n",
4243 fprintf(stderr, "Latencystats running on lcore %d\n",
4244 latencystats_lcore_id);
4248 /* Setup bitrate stats */
4249 #ifdef RTE_LIB_BITRATESTATS
4250 if (bitrate_enabled != 0) {
4251 bitrate_data = rte_stats_bitrate_create();
4252 if (bitrate_data == NULL)
4253 rte_exit(EXIT_FAILURE,
4254 "Could not allocate bitrate data.\n");
4255 rte_stats_bitrate_reg(bitrate_data);
4258 #ifdef RTE_LIB_CMDLINE
4259 if (strlen(cmdline_filename) != 0)
4260 cmdline_read_from_file(cmdline_filename);
4262 if (interactive == 1) {
4264 printf("Start automatic packet forwarding\n");
4265 start_packet_forwarding(0);
4277 printf("No commandline core given, start packet forwarding\n");
4278 start_packet_forwarding(tx_first);
4279 if (stats_period != 0) {
4280 uint64_t prev_time = 0, cur_time, diff_time = 0;
4281 uint64_t timer_period;
4283 /* Convert to number of cycles */
4284 timer_period = stats_period * rte_get_timer_hz();
4286 while (f_quit == 0) {
4287 cur_time = rte_get_timer_cycles();
4288 diff_time += cur_time - prev_time;
4290 if (diff_time >= timer_period) {
4292 /* Reset the timer */
4295 /* Sleep to avoid unnecessary checks */
4296 prev_time = cur_time;
4297 rte_delay_us_sleep(US_PER_S);
4301 printf("Press enter to exit\n");
4302 rc = read(0, &c, 1);
4308 ret = rte_eal_cleanup();
4310 rte_exit(EXIT_FAILURE,
4311 "EAL cleanup failed: %s\n", strerror(-ret));
4313 return EXIT_SUCCESS;