1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
73 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
74 #define HUGE_FLAG (0x40000)
76 #define HUGE_FLAG MAP_HUGETLB
79 #ifndef MAP_HUGE_SHIFT
80 /* older kernels (or FreeBSD) will not have this define */
81 #define HUGE_SHIFT (26)
83 #define HUGE_SHIFT MAP_HUGE_SHIFT
86 #define EXTMEM_HEAP_NAME "extmem"
87 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
89 uint16_t verbose_level = 0; /**< Silent by default. */
90 int testpmd_logtype; /**< Log type for testpmd logs */
92 /* use main core for command line ? */
93 uint8_t interactive = 0;
94 uint8_t auto_start = 0;
96 char cmdline_filename[PATH_MAX] = {0};
99 * NUMA support configuration.
100 * When set, the NUMA support attempts to dispatch the allocation of the
101 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
102 * probed ports among the CPU sockets 0 and 1.
103 * Otherwise, all memory is allocated from CPU socket 0.
105 uint8_t numa_support = 1; /**< numa enabled by default */
108 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111 uint8_t socket_num = UMA_NO_CONFIG;
114 * Select mempool allocation type:
115 * - native: use regular DPDK memory
116 * - anon: use regular DPDK memory to create mempool, but populate using
117 * anonymous memory (may not be IOVA-contiguous)
118 * - xmem: use externally allocated hugepage memory
120 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
123 * Store specified sockets on which memory pool to be used by ports
126 uint8_t port_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which RX ring to be used by ports
132 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
135 * Store specified sockets on which TX ring to be used by ports
138 uint8_t txring_numa[RTE_MAX_ETHPORTS];
141 * Record the Ethernet address of peer target ports to which packets are
143 * Must be instantiated with the ethernet addresses of peer traffic generator
146 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
147 portid_t nb_peer_eth_addrs = 0;
150 * Probed Target Environment.
152 struct rte_port *ports; /**< For all probed ethernet ports. */
153 portid_t nb_ports; /**< Number of probed ethernet ports. */
154 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
155 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
157 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
160 * Test Forwarding Configuration.
161 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
162 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
164 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
165 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
166 portid_t nb_cfg_ports; /**< Number of configured ports. */
167 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
169 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
170 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
172 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
173 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
176 * Forwarding engines.
178 struct fwd_engine * fwd_engines[] = {
188 &five_tuple_swap_fwd_engine,
189 #ifdef RTE_LIBRTE_IEEE1588
190 &ieee1588_fwd_engine,
196 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
197 uint16_t mempool_flags;
199 struct fwd_config cur_fwd_config;
200 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
201 uint32_t retry_enabled;
202 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
203 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
205 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
206 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
207 DEFAULT_MBUF_DATA_SIZE
208 }; /**< Mbuf data space size. */
209 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
210 * specified on command-line. */
211 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
213 /** Extended statistics to show. */
214 struct rte_eth_xstat_name *xstats_display;
216 unsigned int xstats_display_num; /**< Size of extended statistics to show */
219 * In container, it cannot terminate the process which running with 'stats-period'
220 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
225 * Max Rx frame size, set by '--max-pkt-len' parameter.
227 uint32_t max_rx_pkt_len;
230 * Configuration of packet segments used to scatter received packets
231 * if some of split features is configured.
233 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
234 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
235 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
236 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
239 * Configuration of packet segments used by the "txonly" processing engine.
241 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
242 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
243 TXONLY_DEF_PACKET_LEN,
245 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
247 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
248 /**< Split policy for packets to TX. */
250 uint8_t txonly_multi_flow;
251 /**< Whether multiple flows are generated in TXONLY mode. */
253 uint32_t tx_pkt_times_inter;
254 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
256 uint32_t tx_pkt_times_intra;
257 /**< Timings for send scheduling in TXONLY mode, time between packets. */
259 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
260 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
261 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
262 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
264 /* current configuration is in DCB or not,0 means it is not in DCB mode */
265 uint8_t dcb_config = 0;
268 * Configurable number of RX/TX queues.
270 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
271 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
272 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
275 * Configurable number of RX/TX ring descriptors.
276 * Defaults are supplied by drivers via ethdev.
278 #define RTE_TEST_RX_DESC_DEFAULT 0
279 #define RTE_TEST_TX_DESC_DEFAULT 0
280 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
281 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
283 #define RTE_PMD_PARAM_UNSET -1
285 * Configurable values of RX and TX ring threshold registers.
288 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
289 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
290 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
292 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
293 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
294 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
297 * Configurable value of RX free threshold.
299 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
302 * Configurable value of RX drop enable.
304 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
307 * Configurable value of TX free threshold.
309 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
312 * Configurable value of TX RS bit threshold.
314 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
317 * Configurable value of buffered packets before sending.
319 uint16_t noisy_tx_sw_bufsz;
322 * Configurable value of packet buffer timeout.
324 uint16_t noisy_tx_sw_buf_flush_time;
327 * Configurable value for size of VNF internal memory area
328 * used for simulating noisy neighbour behaviour
330 uint64_t noisy_lkup_mem_sz;
333 * Configurable value of number of random writes done in
334 * VNF simulation memory area.
336 uint64_t noisy_lkup_num_writes;
339 * Configurable value of number of random reads done in
340 * VNF simulation memory area.
342 uint64_t noisy_lkup_num_reads;
345 * Configurable value of number of random reads/writes done in
346 * VNF simulation memory area.
348 uint64_t noisy_lkup_num_reads_writes;
351 * Receive Side Scaling (RSS) configuration.
353 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
356 * Port topology configuration
358 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
361 * Avoids to flush all the RX streams before starts forwarding.
363 uint8_t no_flush_rx = 0; /* flush by default */
366 * Flow API isolated mode.
368 uint8_t flow_isolate_all;
371 * Avoids to check link status when starting/stopping a port.
373 uint8_t no_link_check = 0; /* check by default */
376 * Don't automatically start all ports in interactive mode.
378 uint8_t no_device_start = 0;
381 * Enable link status change notification
383 uint8_t lsc_interrupt = 1; /* enabled by default */
386 * Enable device removal notification.
388 uint8_t rmv_interrupt = 1; /* enabled by default */
390 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
392 /* After attach, port setup is called on event or by iterator */
393 bool setup_on_probe_event = true;
395 /* Clear ptypes on port initialization. */
396 uint8_t clear_ptypes = true;
398 /* Hairpin ports configuration mode. */
399 uint16_t hairpin_mode;
401 /* Pretty printing of ethdev events */
402 static const char * const eth_event_desc[] = {
403 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
404 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
405 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
406 [RTE_ETH_EVENT_INTR_RESET] = "reset",
407 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
408 [RTE_ETH_EVENT_IPSEC] = "IPsec",
409 [RTE_ETH_EVENT_MACSEC] = "MACsec",
410 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
411 [RTE_ETH_EVENT_NEW] = "device probed",
412 [RTE_ETH_EVENT_DESTROY] = "device released",
413 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
414 [RTE_ETH_EVENT_MAX] = NULL,
418 * Display or mask ether events
419 * Default to all events except VF_MBOX
421 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
422 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
423 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
424 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
425 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
426 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
427 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
428 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
430 * Decide if all memory are locked for performance.
435 * NIC bypass mode configuration options.
438 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
439 /* The NIC bypass watchdog timeout. */
440 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
444 #ifdef RTE_LIB_LATENCYSTATS
447 * Set when latency stats is enabled in the commandline
449 uint8_t latencystats_enabled;
452 * Lcore ID to serive latency statistics.
454 lcoreid_t latencystats_lcore_id = -1;
459 * Ethernet device configuration.
461 struct rte_eth_rxmode rx_mode;
463 struct rte_eth_txmode tx_mode = {
464 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
467 struct rte_eth_fdir_conf fdir_conf = {
468 .mode = RTE_FDIR_MODE_NONE,
469 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
470 .status = RTE_FDIR_REPORT_STATUS,
472 .vlan_tci_mask = 0xFFEF,
474 .src_ip = 0xFFFFFFFF,
475 .dst_ip = 0xFFFFFFFF,
478 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
479 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
481 .src_port_mask = 0xFFFF,
482 .dst_port_mask = 0xFFFF,
483 .mac_addr_byte_mask = 0xFF,
484 .tunnel_type_mask = 1,
485 .tunnel_id_mask = 0xFFFFFFFF,
490 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
493 * Display zero values by default for xstats
495 uint8_t xstats_hide_zero;
498 * Measure of CPU cycles disabled by default
500 uint8_t record_core_cycles;
503 * Display of RX and TX bursts disabled by default
505 uint8_t record_burst_stats;
508 * Number of ports per shared Rx queue group, 0 disable.
512 unsigned int num_sockets = 0;
513 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
515 #ifdef RTE_LIB_BITRATESTATS
516 /* Bitrate statistics */
517 struct rte_stats_bitrates *bitrate_data;
518 lcoreid_t bitrate_lcore_id;
519 uint8_t bitrate_enabled;
523 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
524 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
528 * hexadecimal bitmask of RX mq mode can be enabled.
530 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
533 * Used to set forced link speed
535 uint32_t eth_link_speed;
538 * ID of the current process in multi-process, used to
539 * configure the queues to be polled.
544 * Number of processes in multi-process, used to
545 * configure the queues to be polled.
547 unsigned int num_procs = 1;
550 eth_rx_metadata_negotiate_mp(uint16_t port_id)
552 uint64_t rx_meta_features = 0;
555 if (!is_proc_primary())
558 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
559 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
560 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
562 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
564 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
565 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
569 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
570 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
574 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
575 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
578 } else if (ret != -ENOTSUP) {
579 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
580 port_id, rte_strerror(-ret));
585 flow_pick_transfer_proxy_mp(uint16_t port_id)
587 struct rte_port *port = &ports[port_id];
590 port->flow_transfer_proxy = port_id;
592 if (!is_proc_primary())
595 ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
598 fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
599 port_id, rte_strerror(-ret));
604 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
605 const struct rte_eth_conf *dev_conf)
607 if (is_proc_primary())
608 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
614 eth_dev_start_mp(uint16_t port_id)
616 if (is_proc_primary())
617 return rte_eth_dev_start(port_id);
623 eth_dev_stop_mp(uint16_t port_id)
625 if (is_proc_primary())
626 return rte_eth_dev_stop(port_id);
632 mempool_free_mp(struct rte_mempool *mp)
634 if (is_proc_primary())
635 rte_mempool_free(mp);
639 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
641 if (is_proc_primary())
642 return rte_eth_dev_set_mtu(port_id, mtu);
647 /* Forward function declarations */
648 static void setup_attached_port(portid_t pi);
649 static void check_all_ports_link_status(uint32_t port_mask);
650 static int eth_event_callback(portid_t port_id,
651 enum rte_eth_event_type type,
652 void *param, void *ret_param);
653 static void dev_event_callback(const char *device_name,
654 enum rte_dev_event_type type,
656 static void fill_xstats_display_info(void);
659 * Check if all the ports are started.
660 * If yes, return positive value. If not, return zero.
662 static int all_ports_started(void);
665 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
666 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
669 /* Holds the registered mbuf dynamic flags names. */
670 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
674 * Helper function to check if socket is already discovered.
675 * If yes, return positive value. If not, return zero.
678 new_socket_id(unsigned int socket_id)
682 for (i = 0; i < num_sockets; i++) {
683 if (socket_ids[i] == socket_id)
690 * Setup default configuration.
693 set_default_fwd_lcores_config(void)
697 unsigned int sock_num;
700 for (i = 0; i < RTE_MAX_LCORE; i++) {
701 if (!rte_lcore_is_enabled(i))
703 sock_num = rte_lcore_to_socket_id(i);
704 if (new_socket_id(sock_num)) {
705 if (num_sockets >= RTE_MAX_NUMA_NODES) {
706 rte_exit(EXIT_FAILURE,
707 "Total sockets greater than %u\n",
710 socket_ids[num_sockets++] = sock_num;
712 if (i == rte_get_main_lcore())
714 fwd_lcores_cpuids[nb_lc++] = i;
716 nb_lcores = (lcoreid_t) nb_lc;
717 nb_cfg_lcores = nb_lcores;
722 set_def_peer_eth_addrs(void)
726 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
727 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
728 peer_eth_addrs[i].addr_bytes[5] = i;
733 set_default_fwd_ports_config(void)
738 RTE_ETH_FOREACH_DEV(pt_id) {
739 fwd_ports_ids[i++] = pt_id;
741 /* Update sockets info according to the attached device */
742 int socket_id = rte_eth_dev_socket_id(pt_id);
743 if (socket_id >= 0 && new_socket_id(socket_id)) {
744 if (num_sockets >= RTE_MAX_NUMA_NODES) {
745 rte_exit(EXIT_FAILURE,
746 "Total sockets greater than %u\n",
749 socket_ids[num_sockets++] = socket_id;
753 nb_cfg_ports = nb_ports;
754 nb_fwd_ports = nb_ports;
758 set_def_fwd_config(void)
760 set_default_fwd_lcores_config();
761 set_def_peer_eth_addrs();
762 set_default_fwd_ports_config();
765 #ifndef RTE_EXEC_ENV_WINDOWS
766 /* extremely pessimistic estimation of memory required to create a mempool */
768 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
770 unsigned int n_pages, mbuf_per_pg, leftover;
771 uint64_t total_mem, mbuf_mem, obj_sz;
773 /* there is no good way to predict how much space the mempool will
774 * occupy because it will allocate chunks on the fly, and some of those
775 * will come from default DPDK memory while some will come from our
776 * external memory, so just assume 128MB will be enough for everyone.
778 uint64_t hdr_mem = 128 << 20;
780 /* account for possible non-contiguousness */
781 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
783 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
787 mbuf_per_pg = pgsz / obj_sz;
788 leftover = (nb_mbufs % mbuf_per_pg) > 0;
789 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
791 mbuf_mem = n_pages * pgsz;
793 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
795 if (total_mem > SIZE_MAX) {
796 TESTPMD_LOG(ERR, "Memory size too big\n");
799 *out = (size_t)total_mem;
805 pagesz_flags(uint64_t page_sz)
807 /* as per mmap() manpage, all page sizes are log2 of page size
808 * shifted by MAP_HUGE_SHIFT
810 int log2 = rte_log2_u64(page_sz);
812 return (log2 << HUGE_SHIFT);
816 alloc_mem(size_t memsz, size_t pgsz, bool huge)
821 /* allocate anonymous hugepages */
822 flags = MAP_ANONYMOUS | MAP_PRIVATE;
824 flags |= HUGE_FLAG | pagesz_flags(pgsz);
826 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
827 if (addr == MAP_FAILED)
833 struct extmem_param {
837 rte_iova_t *iova_table;
838 unsigned int iova_table_len;
842 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
845 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
846 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
847 unsigned int cur_page, n_pages, pgsz_idx;
848 size_t mem_sz, cur_pgsz;
849 rte_iova_t *iovas = NULL;
853 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
854 /* skip anything that is too big */
855 if (pgsizes[pgsz_idx] > SIZE_MAX)
858 cur_pgsz = pgsizes[pgsz_idx];
860 /* if we were told not to allocate hugepages, override */
862 cur_pgsz = sysconf(_SC_PAGESIZE);
864 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
866 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
870 /* allocate our memory */
871 addr = alloc_mem(mem_sz, cur_pgsz, huge);
873 /* if we couldn't allocate memory with a specified page size,
874 * that doesn't mean we can't do it with other page sizes, so
880 /* store IOVA addresses for every page in this memory area */
881 n_pages = mem_sz / cur_pgsz;
883 iovas = malloc(sizeof(*iovas) * n_pages);
886 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
889 /* lock memory if it's not huge pages */
893 /* populate IOVA addresses */
894 for (cur_page = 0; cur_page < n_pages; cur_page++) {
899 offset = cur_pgsz * cur_page;
900 cur = RTE_PTR_ADD(addr, offset);
902 /* touch the page before getting its IOVA */
903 *(volatile char *)cur = 0;
905 iova = rte_mem_virt2iova(cur);
907 iovas[cur_page] = iova;
912 /* if we couldn't allocate anything */
918 param->pgsz = cur_pgsz;
919 param->iova_table = iovas;
920 param->iova_table_len = n_pages;
927 munmap(addr, mem_sz);
933 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
935 struct extmem_param param;
938 memset(¶m, 0, sizeof(param));
940 /* check if our heap exists */
941 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
943 /* create our heap */
944 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
946 TESTPMD_LOG(ERR, "Cannot create heap\n");
951 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
953 TESTPMD_LOG(ERR, "Cannot create memory area\n");
957 /* we now have a valid memory area, so add it to heap */
958 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
959 param.addr, param.len, param.iova_table,
960 param.iova_table_len, param.pgsz);
962 /* when using VFIO, memory is automatically mapped for DMA by EAL */
964 /* not needed any more */
965 free(param.iova_table);
968 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
969 munmap(param.addr, param.len);
975 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
981 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
982 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
987 RTE_ETH_FOREACH_DEV(pid) {
988 struct rte_eth_dev_info dev_info;
990 ret = eth_dev_info_get_print_err(pid, &dev_info);
993 "unable to get device info for port %d on addr 0x%p,"
994 "mempool unmapping will not be performed\n",
999 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
1002 "unable to DMA unmap addr 0x%p "
1004 memhdr->addr, dev_info.device->name);
1007 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1010 "unable to un-register addr 0x%p\n", memhdr->addr);
1015 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1016 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1019 size_t page_size = sysconf(_SC_PAGESIZE);
1022 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1026 "unable to register addr 0x%p\n", memhdr->addr);
1029 RTE_ETH_FOREACH_DEV(pid) {
1030 struct rte_eth_dev_info dev_info;
1032 ret = eth_dev_info_get_print_err(pid, &dev_info);
1035 "unable to get device info for port %d on addr 0x%p,"
1036 "mempool mapping will not be performed\n",
1040 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1043 "unable to DMA map addr 0x%p "
1045 memhdr->addr, dev_info.device->name);
1052 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1053 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1055 struct rte_pktmbuf_extmem *xmem;
1056 unsigned int ext_num, zone_num, elt_num;
1059 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1060 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1061 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1063 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1065 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1066 "external buffer descriptors\n");
1070 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1071 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1072 const struct rte_memzone *mz;
1073 char mz_name[RTE_MEMZONE_NAMESIZE];
1076 ret = snprintf(mz_name, sizeof(mz_name),
1077 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1078 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1079 errno = ENAMETOOLONG;
1083 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1085 RTE_MEMZONE_IOVA_CONTIG |
1087 RTE_MEMZONE_SIZE_HINT_ONLY,
1091 * The caller exits on external buffer creation
1092 * error, so there is no need to free memzones.
1098 xseg->buf_ptr = mz->addr;
1099 xseg->buf_iova = mz->iova;
1100 xseg->buf_len = EXTBUF_ZONE_SIZE;
1101 xseg->elt_size = elt_size;
1103 if (ext_num == 0 && xmem != NULL) {
1112 * Configuration initialisation done once at init time.
1114 static struct rte_mempool *
1115 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1116 unsigned int socket_id, uint16_t size_idx)
1118 char pool_name[RTE_MEMPOOL_NAMESIZE];
1119 struct rte_mempool *rte_mp = NULL;
1120 #ifndef RTE_EXEC_ENV_WINDOWS
1123 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1125 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1126 if (!is_proc_primary()) {
1127 rte_mp = rte_mempool_lookup(pool_name);
1129 rte_exit(EXIT_FAILURE,
1130 "Get mbuf pool for socket %u failed: %s\n",
1131 socket_id, rte_strerror(rte_errno));
1136 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1137 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1139 switch (mp_alloc_type) {
1140 case MP_ALLOC_NATIVE:
1142 /* wrapper to rte_mempool_create() */
1143 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1144 rte_mbuf_best_mempool_ops());
1145 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1146 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1149 #ifndef RTE_EXEC_ENV_WINDOWS
1152 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1153 mb_size, (unsigned int) mb_mempool_cache,
1154 sizeof(struct rte_pktmbuf_pool_private),
1155 socket_id, mempool_flags);
1159 if (rte_mempool_populate_anon(rte_mp) == 0) {
1160 rte_mempool_free(rte_mp);
1164 rte_pktmbuf_pool_init(rte_mp, NULL);
1165 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1166 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1170 case MP_ALLOC_XMEM_HUGE:
1173 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1175 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1176 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1179 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1180 if (heap_socket < 0)
1181 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1183 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1184 rte_mbuf_best_mempool_ops());
1185 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1186 mb_mempool_cache, 0, mbuf_seg_size,
1193 struct rte_pktmbuf_extmem *ext_mem;
1194 unsigned int ext_num;
1196 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1197 socket_id, pool_name, &ext_mem);
1199 rte_exit(EXIT_FAILURE,
1200 "Can't create pinned data buffers\n");
1202 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1203 rte_mbuf_best_mempool_ops());
1204 rte_mp = rte_pktmbuf_pool_create_extbuf
1205 (pool_name, nb_mbuf, mb_mempool_cache,
1206 0, mbuf_seg_size, socket_id,
1213 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1217 #ifndef RTE_EXEC_ENV_WINDOWS
1220 if (rte_mp == NULL) {
1221 rte_exit(EXIT_FAILURE,
1222 "Creation of mbuf pool for socket %u failed: %s\n",
1223 socket_id, rte_strerror(rte_errno));
1224 } else if (verbose_level > 0) {
1225 rte_mempool_dump(stdout, rte_mp);
1231 * Check given socket id is valid or not with NUMA mode,
1232 * if valid, return 0, else return -1
1235 check_socket_id(const unsigned int socket_id)
1237 static int warning_once = 0;
1239 if (new_socket_id(socket_id)) {
1240 if (!warning_once && numa_support)
1242 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1250 * Get the allowed maximum number of RX queues.
1251 * *pid return the port id which has minimal value of
1252 * max_rx_queues in all ports.
1255 get_allowed_max_nb_rxq(portid_t *pid)
1257 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1258 bool max_rxq_valid = false;
1260 struct rte_eth_dev_info dev_info;
1262 RTE_ETH_FOREACH_DEV(pi) {
1263 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1266 max_rxq_valid = true;
1267 if (dev_info.max_rx_queues < allowed_max_rxq) {
1268 allowed_max_rxq = dev_info.max_rx_queues;
1272 return max_rxq_valid ? allowed_max_rxq : 0;
1276 * Check input rxq is valid or not.
1277 * If input rxq is not greater than any of maximum number
1278 * of RX queues of all ports, it is valid.
1279 * if valid, return 0, else return -1
1282 check_nb_rxq(queueid_t rxq)
1284 queueid_t allowed_max_rxq;
1287 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1288 if (rxq > allowed_max_rxq) {
1290 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1291 rxq, allowed_max_rxq, pid);
1298 * Get the allowed maximum number of TX queues.
1299 * *pid return the port id which has minimal value of
1300 * max_tx_queues in all ports.
1303 get_allowed_max_nb_txq(portid_t *pid)
1305 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1306 bool max_txq_valid = false;
1308 struct rte_eth_dev_info dev_info;
1310 RTE_ETH_FOREACH_DEV(pi) {
1311 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1314 max_txq_valid = true;
1315 if (dev_info.max_tx_queues < allowed_max_txq) {
1316 allowed_max_txq = dev_info.max_tx_queues;
1320 return max_txq_valid ? allowed_max_txq : 0;
1324 * Check input txq is valid or not.
1325 * If input txq is not greater than any of maximum number
1326 * of TX queues of all ports, it is valid.
1327 * if valid, return 0, else return -1
1330 check_nb_txq(queueid_t txq)
1332 queueid_t allowed_max_txq;
1335 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1336 if (txq > allowed_max_txq) {
1338 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1339 txq, allowed_max_txq, pid);
1346 * Get the allowed maximum number of RXDs of every rx queue.
1347 * *pid return the port id which has minimal value of
1348 * max_rxd in all queues of all ports.
1351 get_allowed_max_nb_rxd(portid_t *pid)
1353 uint16_t allowed_max_rxd = UINT16_MAX;
1355 struct rte_eth_dev_info dev_info;
1357 RTE_ETH_FOREACH_DEV(pi) {
1358 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1361 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1362 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1366 return allowed_max_rxd;
1370 * Get the allowed minimal number of RXDs of every rx queue.
1371 * *pid return the port id which has minimal value of
1372 * min_rxd in all queues of all ports.
1375 get_allowed_min_nb_rxd(portid_t *pid)
1377 uint16_t allowed_min_rxd = 0;
1379 struct rte_eth_dev_info dev_info;
1381 RTE_ETH_FOREACH_DEV(pi) {
1382 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1385 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1386 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1391 return allowed_min_rxd;
1395 * Check input rxd is valid or not.
1396 * If input rxd is not greater than any of maximum number
1397 * of RXDs of every Rx queues and is not less than any of
1398 * minimal number of RXDs of every Rx queues, it is valid.
1399 * if valid, return 0, else return -1
1402 check_nb_rxd(queueid_t rxd)
1404 uint16_t allowed_max_rxd;
1405 uint16_t allowed_min_rxd;
1408 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1409 if (rxd > allowed_max_rxd) {
1411 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1412 rxd, allowed_max_rxd, pid);
1416 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1417 if (rxd < allowed_min_rxd) {
1419 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1420 rxd, allowed_min_rxd, pid);
1428 * Get the allowed maximum number of TXDs of every rx queues.
1429 * *pid return the port id which has minimal value of
1430 * max_txd in every tx queue.
1433 get_allowed_max_nb_txd(portid_t *pid)
1435 uint16_t allowed_max_txd = UINT16_MAX;
1437 struct rte_eth_dev_info dev_info;
1439 RTE_ETH_FOREACH_DEV(pi) {
1440 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1443 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1444 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1448 return allowed_max_txd;
1452 * Get the allowed maximum number of TXDs of every tx queues.
1453 * *pid return the port id which has minimal value of
1454 * min_txd in every tx queue.
1457 get_allowed_min_nb_txd(portid_t *pid)
1459 uint16_t allowed_min_txd = 0;
1461 struct rte_eth_dev_info dev_info;
1463 RTE_ETH_FOREACH_DEV(pi) {
1464 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1467 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1468 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1473 return allowed_min_txd;
1477 * Check input txd is valid or not.
1478 * If input txd is not greater than any of maximum number
1479 * of TXDs of every Rx queues, it is valid.
1480 * if valid, return 0, else return -1
1483 check_nb_txd(queueid_t txd)
1485 uint16_t allowed_max_txd;
1486 uint16_t allowed_min_txd;
1489 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1490 if (txd > allowed_max_txd) {
1492 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1493 txd, allowed_max_txd, pid);
1497 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1498 if (txd < allowed_min_txd) {
1500 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1501 txd, allowed_min_txd, pid);
1509 * Get the allowed maximum number of hairpin queues.
1510 * *pid return the port id which has minimal value of
1511 * max_hairpin_queues in all ports.
1514 get_allowed_max_nb_hairpinq(portid_t *pid)
1516 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1518 struct rte_eth_hairpin_cap cap;
1520 RTE_ETH_FOREACH_DEV(pi) {
1521 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1525 if (cap.max_nb_queues < allowed_max_hairpinq) {
1526 allowed_max_hairpinq = cap.max_nb_queues;
1530 return allowed_max_hairpinq;
1534 * Check input hairpin is valid or not.
1535 * If input hairpin is not greater than any of maximum number
1536 * of hairpin queues of all ports, it is valid.
1537 * if valid, return 0, else return -1
1540 check_nb_hairpinq(queueid_t hairpinq)
1542 queueid_t allowed_max_hairpinq;
1545 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1546 if (hairpinq > allowed_max_hairpinq) {
1548 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1549 hairpinq, allowed_max_hairpinq, pid);
1556 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1558 uint32_t eth_overhead;
1560 if (dev_info->max_mtu != UINT16_MAX &&
1561 dev_info->max_rx_pktlen > dev_info->max_mtu)
1562 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1564 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1566 return eth_overhead;
1570 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1572 struct rte_port *port = &ports[pid];
1576 eth_rx_metadata_negotiate_mp(pid);
1577 flow_pick_transfer_proxy_mp(pid);
1579 port->dev_conf.txmode = tx_mode;
1580 port->dev_conf.rxmode = rx_mode;
1582 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1584 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1586 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1587 port->dev_conf.txmode.offloads &=
1588 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1590 /* Apply Rx offloads configuration */
1591 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1592 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1593 /* Apply Tx offloads configuration */
1594 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1595 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1598 port->dev_conf.link_speeds = eth_link_speed;
1601 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1602 get_eth_overhead(&port->dev_info);
1604 /* set flag to initialize port/queue */
1605 port->need_reconfig = 1;
1606 port->need_reconfig_queues = 1;
1607 port->socket_id = socket_id;
1608 port->tx_metadata = 0;
1611 * Check for maximum number of segments per MTU.
1612 * Accordingly update the mbuf data size.
1614 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1615 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1616 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1619 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1620 uint16_t data_size = (mtu + eth_overhead) /
1621 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1622 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1624 if (buffer_size > mbuf_data_size[0]) {
1625 mbuf_data_size[0] = buffer_size;
1626 TESTPMD_LOG(WARNING,
1627 "Configured mbuf size of the first segment %hu\n",
1638 struct rte_mempool *mbp;
1639 unsigned int nb_mbuf_per_pool;
1642 struct rte_gro_param gro_param;
1648 /* Configuration of logical cores. */
1649 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1650 sizeof(struct fwd_lcore *) * nb_lcores,
1651 RTE_CACHE_LINE_SIZE);
1652 if (fwd_lcores == NULL) {
1653 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1654 "failed\n", nb_lcores);
1656 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1657 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1658 sizeof(struct fwd_lcore),
1659 RTE_CACHE_LINE_SIZE);
1660 if (fwd_lcores[lc_id] == NULL) {
1661 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1664 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1667 RTE_ETH_FOREACH_DEV(pid) {
1671 socket_id = port_numa[pid];
1672 if (port_numa[pid] == NUMA_NO_CONFIG) {
1673 socket_id = rte_eth_dev_socket_id(pid);
1676 * if socket_id is invalid,
1677 * set to the first available socket.
1679 if (check_socket_id(socket_id) < 0)
1680 socket_id = socket_ids[0];
1683 socket_id = (socket_num == UMA_NO_CONFIG) ?
1686 /* Apply default TxRx configuration for all ports */
1687 init_config_port_offloads(pid, socket_id);
1690 * Create pools of mbuf.
1691 * If NUMA support is disabled, create a single pool of mbuf in
1692 * socket 0 memory by default.
1693 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1695 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1696 * nb_txd can be configured at run time.
1698 if (param_total_num_mbufs)
1699 nb_mbuf_per_pool = param_total_num_mbufs;
1701 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1702 (nb_lcores * mb_mempool_cache) +
1703 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1704 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1710 for (i = 0; i < num_sockets; i++)
1711 for (j = 0; j < mbuf_data_size_n; j++)
1712 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1713 mbuf_pool_create(mbuf_data_size[j],
1719 for (i = 0; i < mbuf_data_size_n; i++)
1720 mempools[i] = mbuf_pool_create
1723 socket_num == UMA_NO_CONFIG ?
1730 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1731 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1734 * Records which Mbuf pool to use by each logical core, if needed.
1736 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1737 mbp = mbuf_pool_find(
1738 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1741 mbp = mbuf_pool_find(0, 0);
1742 fwd_lcores[lc_id]->mbp = mbp;
1744 /* initialize GSO context */
1745 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1746 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1747 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1748 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1750 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1757 /* create a gro context for each lcore */
1758 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1759 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1760 gro_param.max_item_per_flow = MAX_PKT_BURST;
1761 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1762 gro_param.socket_id = rte_lcore_to_socket_id(
1763 fwd_lcores_cpuids[lc_id]);
1764 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1765 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1766 rte_exit(EXIT_FAILURE,
1767 "rte_gro_ctx_create() failed\n");
1775 reconfig(portid_t new_port_id, unsigned socket_id)
1777 /* Reconfiguration of Ethernet ports. */
1778 init_config_port_offloads(new_port_id, socket_id);
1784 init_fwd_streams(void)
1787 struct rte_port *port;
1788 streamid_t sm_id, nb_fwd_streams_new;
1791 /* set socket id according to numa or not */
1792 RTE_ETH_FOREACH_DEV(pid) {
1794 if (nb_rxq > port->dev_info.max_rx_queues) {
1796 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1797 nb_rxq, port->dev_info.max_rx_queues);
1800 if (nb_txq > port->dev_info.max_tx_queues) {
1802 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1803 nb_txq, port->dev_info.max_tx_queues);
1807 if (port_numa[pid] != NUMA_NO_CONFIG)
1808 port->socket_id = port_numa[pid];
1810 port->socket_id = rte_eth_dev_socket_id(pid);
1813 * if socket_id is invalid,
1814 * set to the first available socket.
1816 if (check_socket_id(port->socket_id) < 0)
1817 port->socket_id = socket_ids[0];
1821 if (socket_num == UMA_NO_CONFIG)
1822 port->socket_id = 0;
1824 port->socket_id = socket_num;
1828 q = RTE_MAX(nb_rxq, nb_txq);
1831 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1834 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1835 if (nb_fwd_streams_new == nb_fwd_streams)
1838 if (fwd_streams != NULL) {
1839 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1840 if (fwd_streams[sm_id] == NULL)
1842 rte_free(fwd_streams[sm_id]);
1843 fwd_streams[sm_id] = NULL;
1845 rte_free(fwd_streams);
1850 nb_fwd_streams = nb_fwd_streams_new;
1851 if (nb_fwd_streams) {
1852 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1853 sizeof(struct fwd_stream *) * nb_fwd_streams,
1854 RTE_CACHE_LINE_SIZE);
1855 if (fwd_streams == NULL)
1856 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1857 " (struct fwd_stream *)) failed\n",
1860 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1861 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1862 " struct fwd_stream", sizeof(struct fwd_stream),
1863 RTE_CACHE_LINE_SIZE);
1864 if (fwd_streams[sm_id] == NULL)
1865 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1866 "(struct fwd_stream) failed\n");
1874 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1876 uint64_t total_burst, sburst;
1878 uint64_t burst_stats[4];
1879 uint16_t pktnb_stats[4];
1881 int burst_percent[4], sburstp;
1885 * First compute the total number of packet bursts and the
1886 * two highest numbers of bursts of the same number of packets.
1888 memset(&burst_stats, 0x0, sizeof(burst_stats));
1889 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1891 /* Show stats for 0 burst size always */
1892 total_burst = pbs->pkt_burst_spread[0];
1893 burst_stats[0] = pbs->pkt_burst_spread[0];
1896 /* Find the next 2 burst sizes with highest occurrences. */
1897 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1898 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1903 total_burst += nb_burst;
1905 if (nb_burst > burst_stats[1]) {
1906 burst_stats[2] = burst_stats[1];
1907 pktnb_stats[2] = pktnb_stats[1];
1908 burst_stats[1] = nb_burst;
1909 pktnb_stats[1] = nb_pkt;
1910 } else if (nb_burst > burst_stats[2]) {
1911 burst_stats[2] = nb_burst;
1912 pktnb_stats[2] = nb_pkt;
1915 if (total_burst == 0)
1918 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1919 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1921 printf("%d%% of other]\n", 100 - sburstp);
1925 sburst += burst_stats[i];
1926 if (sburst == total_burst) {
1927 printf("%d%% of %d pkts]\n",
1928 100 - sburstp, (int) pktnb_stats[i]);
1933 (double)burst_stats[i] / total_burst * 100;
1934 printf("%d%% of %d pkts + ",
1935 burst_percent[i], (int) pktnb_stats[i]);
1936 sburstp += burst_percent[i];
1941 fwd_stream_stats_display(streamid_t stream_id)
1943 struct fwd_stream *fs;
1944 static const char *fwd_top_stats_border = "-------";
1946 fs = fwd_streams[stream_id];
1947 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1948 (fs->fwd_dropped == 0))
1950 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1951 "TX Port=%2d/Queue=%2d %s\n",
1952 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1953 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1954 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1955 " TX-dropped: %-14"PRIu64,
1956 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1958 /* if checksum mode */
1959 if (cur_fwd_eng == &csum_fwd_engine) {
1960 printf(" RX- bad IP checksum: %-14"PRIu64
1961 " Rx- bad L4 checksum: %-14"PRIu64
1962 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1963 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1964 fs->rx_bad_outer_l4_csum);
1965 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1966 fs->rx_bad_outer_ip_csum);
1971 if (record_burst_stats) {
1972 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1973 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1978 fwd_stats_display(void)
1980 static const char *fwd_stats_border = "----------------------";
1981 static const char *acc_stats_border = "+++++++++++++++";
1983 struct fwd_stream *rx_stream;
1984 struct fwd_stream *tx_stream;
1985 uint64_t tx_dropped;
1986 uint64_t rx_bad_ip_csum;
1987 uint64_t rx_bad_l4_csum;
1988 uint64_t rx_bad_outer_l4_csum;
1989 uint64_t rx_bad_outer_ip_csum;
1990 } ports_stats[RTE_MAX_ETHPORTS];
1991 uint64_t total_rx_dropped = 0;
1992 uint64_t total_tx_dropped = 0;
1993 uint64_t total_rx_nombuf = 0;
1994 struct rte_eth_stats stats;
1995 uint64_t fwd_cycles = 0;
1996 uint64_t total_recv = 0;
1997 uint64_t total_xmit = 0;
1998 struct rte_port *port;
2003 memset(ports_stats, 0, sizeof(ports_stats));
2005 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2006 struct fwd_stream *fs = fwd_streams[sm_id];
2008 if (cur_fwd_config.nb_fwd_streams >
2009 cur_fwd_config.nb_fwd_ports) {
2010 fwd_stream_stats_display(sm_id);
2012 ports_stats[fs->tx_port].tx_stream = fs;
2013 ports_stats[fs->rx_port].rx_stream = fs;
2016 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2018 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2019 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2020 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2021 fs->rx_bad_outer_l4_csum;
2022 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2023 fs->rx_bad_outer_ip_csum;
2025 if (record_core_cycles)
2026 fwd_cycles += fs->core_cycles;
2028 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2029 pt_id = fwd_ports_ids[i];
2030 port = &ports[pt_id];
2032 rte_eth_stats_get(pt_id, &stats);
2033 stats.ipackets -= port->stats.ipackets;
2034 stats.opackets -= port->stats.opackets;
2035 stats.ibytes -= port->stats.ibytes;
2036 stats.obytes -= port->stats.obytes;
2037 stats.imissed -= port->stats.imissed;
2038 stats.oerrors -= port->stats.oerrors;
2039 stats.rx_nombuf -= port->stats.rx_nombuf;
2041 total_recv += stats.ipackets;
2042 total_xmit += stats.opackets;
2043 total_rx_dropped += stats.imissed;
2044 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2045 total_tx_dropped += stats.oerrors;
2046 total_rx_nombuf += stats.rx_nombuf;
2048 printf("\n %s Forward statistics for port %-2d %s\n",
2049 fwd_stats_border, pt_id, fwd_stats_border);
2051 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2052 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2053 stats.ipackets + stats.imissed);
2055 if (cur_fwd_eng == &csum_fwd_engine) {
2056 printf(" Bad-ipcsum: %-14"PRIu64
2057 " Bad-l4csum: %-14"PRIu64
2058 "Bad-outer-l4csum: %-14"PRIu64"\n",
2059 ports_stats[pt_id].rx_bad_ip_csum,
2060 ports_stats[pt_id].rx_bad_l4_csum,
2061 ports_stats[pt_id].rx_bad_outer_l4_csum);
2062 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2063 ports_stats[pt_id].rx_bad_outer_ip_csum);
2065 if (stats.ierrors + stats.rx_nombuf > 0) {
2066 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2067 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2070 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2071 "TX-total: %-"PRIu64"\n",
2072 stats.opackets, ports_stats[pt_id].tx_dropped,
2073 stats.opackets + ports_stats[pt_id].tx_dropped);
2075 if (record_burst_stats) {
2076 if (ports_stats[pt_id].rx_stream)
2077 pkt_burst_stats_display("RX",
2078 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2079 if (ports_stats[pt_id].tx_stream)
2080 pkt_burst_stats_display("TX",
2081 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2084 printf(" %s--------------------------------%s\n",
2085 fwd_stats_border, fwd_stats_border);
2088 printf("\n %s Accumulated forward statistics for all ports"
2090 acc_stats_border, acc_stats_border);
2091 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2093 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2095 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2096 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2097 if (total_rx_nombuf > 0)
2098 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2099 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2101 acc_stats_border, acc_stats_border);
2102 if (record_core_cycles) {
2103 #define CYC_PER_MHZ 1E6
2104 if (total_recv > 0 || total_xmit > 0) {
2105 uint64_t total_pkts = 0;
2106 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2107 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2108 total_pkts = total_xmit;
2110 total_pkts = total_recv;
2112 printf("\n CPU cycles/packet=%.2F (total cycles="
2113 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2115 (double) fwd_cycles / total_pkts,
2116 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2117 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2123 fwd_stats_reset(void)
2129 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2130 pt_id = fwd_ports_ids[i];
2131 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2133 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2134 struct fwd_stream *fs = fwd_streams[sm_id];
2138 fs->fwd_dropped = 0;
2139 fs->rx_bad_ip_csum = 0;
2140 fs->rx_bad_l4_csum = 0;
2141 fs->rx_bad_outer_l4_csum = 0;
2142 fs->rx_bad_outer_ip_csum = 0;
2144 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2145 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2146 fs->core_cycles = 0;
2151 flush_fwd_rx_queues(void)
2153 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2160 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2161 uint64_t timer_period;
2163 if (num_procs > 1) {
2164 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2168 /* convert to number of cycles */
2169 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2171 for (j = 0; j < 2; j++) {
2172 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2173 for (rxq = 0; rxq < nb_rxq; rxq++) {
2174 port_id = fwd_ports_ids[rxp];
2176 * testpmd can stuck in the below do while loop
2177 * if rte_eth_rx_burst() always returns nonzero
2178 * packets. So timer is added to exit this loop
2179 * after 1sec timer expiry.
2181 prev_tsc = rte_rdtsc();
2183 nb_rx = rte_eth_rx_burst(port_id, rxq,
2184 pkts_burst, MAX_PKT_BURST);
2185 for (i = 0; i < nb_rx; i++)
2186 rte_pktmbuf_free(pkts_burst[i]);
2188 cur_tsc = rte_rdtsc();
2189 diff_tsc = cur_tsc - prev_tsc;
2190 timer_tsc += diff_tsc;
2191 } while ((nb_rx > 0) &&
2192 (timer_tsc < timer_period));
2196 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2201 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2203 struct fwd_stream **fsm;
2206 #ifdef RTE_LIB_BITRATESTATS
2207 uint64_t tics_per_1sec;
2208 uint64_t tics_datum;
2209 uint64_t tics_current;
2210 uint16_t i, cnt_ports;
2212 cnt_ports = nb_ports;
2213 tics_datum = rte_rdtsc();
2214 tics_per_1sec = rte_get_timer_hz();
2216 fsm = &fwd_streams[fc->stream_idx];
2217 nb_fs = fc->stream_nb;
2219 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2220 (*pkt_fwd)(fsm[sm_id]);
2221 #ifdef RTE_LIB_BITRATESTATS
2222 if (bitrate_enabled != 0 &&
2223 bitrate_lcore_id == rte_lcore_id()) {
2224 tics_current = rte_rdtsc();
2225 if (tics_current - tics_datum >= tics_per_1sec) {
2226 /* Periodic bitrate calculation */
2227 for (i = 0; i < cnt_ports; i++)
2228 rte_stats_bitrate_calc(bitrate_data,
2230 tics_datum = tics_current;
2234 #ifdef RTE_LIB_LATENCYSTATS
2235 if (latencystats_enabled != 0 &&
2236 latencystats_lcore_id == rte_lcore_id())
2237 rte_latencystats_update();
2240 } while (! fc->stopped);
2244 start_pkt_forward_on_core(void *fwd_arg)
2246 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2247 cur_fwd_config.fwd_eng->packet_fwd);
2252 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2253 * Used to start communication flows in network loopback test configurations.
2256 run_one_txonly_burst_on_core(void *fwd_arg)
2258 struct fwd_lcore *fwd_lc;
2259 struct fwd_lcore tmp_lcore;
2261 fwd_lc = (struct fwd_lcore *) fwd_arg;
2262 tmp_lcore = *fwd_lc;
2263 tmp_lcore.stopped = 1;
2264 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2269 * Launch packet forwarding:
2270 * - Setup per-port forwarding context.
2271 * - launch logical cores with their forwarding configuration.
2274 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2280 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2281 lc_id = fwd_lcores_cpuids[i];
2282 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2283 fwd_lcores[i]->stopped = 0;
2284 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2285 fwd_lcores[i], lc_id);
2288 "launch lcore %u failed - diag=%d\n",
2295 * Launch packet forwarding configuration.
2298 start_packet_forwarding(int with_tx_first)
2300 port_fwd_begin_t port_fwd_begin;
2301 port_fwd_end_t port_fwd_end;
2304 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2305 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2307 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2308 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2310 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2311 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2312 (!nb_rxq || !nb_txq))
2313 rte_exit(EXIT_FAILURE,
2314 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2315 cur_fwd_eng->fwd_mode_name);
2317 if (all_ports_started() == 0) {
2318 fprintf(stderr, "Not all ports were started\n");
2321 if (test_done == 0) {
2322 fprintf(stderr, "Packet forwarding already started\n");
2328 pkt_fwd_config_display(&cur_fwd_config);
2329 if (!pkt_fwd_shared_rxq_check())
2332 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2333 if (port_fwd_begin != NULL) {
2334 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2335 if (port_fwd_begin(fwd_ports_ids[i])) {
2337 "Packet forwarding is not ready\n");
2343 if (with_tx_first) {
2344 port_fwd_begin = tx_only_engine.port_fwd_begin;
2345 if (port_fwd_begin != NULL) {
2346 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2347 if (port_fwd_begin(fwd_ports_ids[i])) {
2349 "Packet forwarding is not ready\n");
2359 flush_fwd_rx_queues();
2361 rxtx_config_display();
2364 if (with_tx_first) {
2365 while (with_tx_first--) {
2366 launch_packet_forwarding(
2367 run_one_txonly_burst_on_core);
2368 rte_eal_mp_wait_lcore();
2370 port_fwd_end = tx_only_engine.port_fwd_end;
2371 if (port_fwd_end != NULL) {
2372 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2373 (*port_fwd_end)(fwd_ports_ids[i]);
2376 launch_packet_forwarding(start_pkt_forward_on_core);
2380 stop_packet_forwarding(void)
2382 port_fwd_end_t port_fwd_end;
2388 fprintf(stderr, "Packet forwarding not started\n");
2391 printf("Telling cores to stop...");
2392 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2393 fwd_lcores[lc_id]->stopped = 1;
2394 printf("\nWaiting for lcores to finish...\n");
2395 rte_eal_mp_wait_lcore();
2396 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2397 if (port_fwd_end != NULL) {
2398 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2399 pt_id = fwd_ports_ids[i];
2400 (*port_fwd_end)(pt_id);
2404 fwd_stats_display();
2406 printf("\nDone.\n");
2411 dev_set_link_up(portid_t pid)
2413 if (rte_eth_dev_set_link_up(pid) < 0)
2414 fprintf(stderr, "\nSet link up fail.\n");
2418 dev_set_link_down(portid_t pid)
2420 if (rte_eth_dev_set_link_down(pid) < 0)
2421 fprintf(stderr, "\nSet link down fail.\n");
2425 all_ports_started(void)
2428 struct rte_port *port;
2430 RTE_ETH_FOREACH_DEV(pi) {
2432 /* Check if there is a port which is not started */
2433 if ((port->port_status != RTE_PORT_STARTED) &&
2434 (port->slave_flag == 0))
2438 /* No port is not started */
2443 port_is_stopped(portid_t port_id)
2445 struct rte_port *port = &ports[port_id];
2447 if ((port->port_status != RTE_PORT_STOPPED) &&
2448 (port->slave_flag == 0))
2454 all_ports_stopped(void)
2458 RTE_ETH_FOREACH_DEV(pi) {
2459 if (!port_is_stopped(pi))
2467 port_is_started(portid_t port_id)
2469 if (port_id_is_invalid(port_id, ENABLED_WARN))
2472 if (ports[port_id].port_status != RTE_PORT_STARTED)
2478 /* Configure the Rx and Tx hairpin queues for the selected port. */
2480 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2483 struct rte_eth_hairpin_conf hairpin_conf = {
2488 struct rte_port *port = &ports[pi];
2489 uint16_t peer_rx_port = pi;
2490 uint16_t peer_tx_port = pi;
2491 uint32_t manual = 1;
2492 uint32_t tx_exp = hairpin_mode & 0x10;
2494 if (!(hairpin_mode & 0xf)) {
2498 } else if (hairpin_mode & 0x1) {
2499 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2500 RTE_ETH_DEV_NO_OWNER);
2501 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2502 peer_tx_port = rte_eth_find_next_owned_by(0,
2503 RTE_ETH_DEV_NO_OWNER);
2504 if (p_pi != RTE_MAX_ETHPORTS) {
2505 peer_rx_port = p_pi;
2509 /* Last port will be the peer RX port of the first. */
2510 RTE_ETH_FOREACH_DEV(next_pi)
2511 peer_rx_port = next_pi;
2514 } else if (hairpin_mode & 0x2) {
2516 peer_rx_port = p_pi;
2518 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2519 RTE_ETH_DEV_NO_OWNER);
2520 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2523 peer_tx_port = peer_rx_port;
2527 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2528 hairpin_conf.peers[0].port = peer_rx_port;
2529 hairpin_conf.peers[0].queue = i + nb_rxq;
2530 hairpin_conf.manual_bind = !!manual;
2531 hairpin_conf.tx_explicit = !!tx_exp;
2532 diag = rte_eth_tx_hairpin_queue_setup
2533 (pi, qi, nb_txd, &hairpin_conf);
2538 /* Fail to setup rx queue, return */
2539 if (port->port_status == RTE_PORT_HANDLING)
2540 port->port_status = RTE_PORT_STOPPED;
2543 "Port %d can not be set back to stopped\n", pi);
2544 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2546 /* try to reconfigure queues next time */
2547 port->need_reconfig_queues = 1;
2550 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2551 hairpin_conf.peers[0].port = peer_tx_port;
2552 hairpin_conf.peers[0].queue = i + nb_txq;
2553 hairpin_conf.manual_bind = !!manual;
2554 hairpin_conf.tx_explicit = !!tx_exp;
2555 diag = rte_eth_rx_hairpin_queue_setup
2556 (pi, qi, nb_rxd, &hairpin_conf);
2561 /* Fail to setup rx queue, return */
2562 if (port->port_status == RTE_PORT_HANDLING)
2563 port->port_status = RTE_PORT_STOPPED;
2566 "Port %d can not be set back to stopped\n", pi);
2567 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2569 /* try to reconfigure queues next time */
2570 port->need_reconfig_queues = 1;
2576 /* Configure the Rx with optional split. */
2578 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2579 uint16_t nb_rx_desc, unsigned int socket_id,
2580 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2582 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2583 unsigned int i, mp_n;
2586 if (rx_pkt_nb_segs <= 1 ||
2587 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2588 rx_conf->rx_seg = NULL;
2589 rx_conf->rx_nseg = 0;
2590 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2591 nb_rx_desc, socket_id,
2595 for (i = 0; i < rx_pkt_nb_segs; i++) {
2596 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2597 struct rte_mempool *mpx;
2599 * Use last valid pool for the segments with number
2600 * exceeding the pool index.
2602 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2603 mpx = mbuf_pool_find(socket_id, mp_n);
2604 /* Handle zero as mbuf data buffer size. */
2605 rx_seg->length = rx_pkt_seg_lengths[i] ?
2606 rx_pkt_seg_lengths[i] :
2607 mbuf_data_size[mp_n];
2608 rx_seg->offset = i < rx_pkt_nb_offs ?
2609 rx_pkt_seg_offsets[i] : 0;
2610 rx_seg->mp = mpx ? mpx : mp;
2612 rx_conf->rx_nseg = rx_pkt_nb_segs;
2613 rx_conf->rx_seg = rx_useg;
2614 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2615 socket_id, rx_conf, NULL);
2616 rx_conf->rx_seg = NULL;
2617 rx_conf->rx_nseg = 0;
2622 alloc_xstats_display_info(portid_t pi)
2624 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2625 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2626 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2628 if (xstats_display_num == 0)
2631 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2632 if (*ids_supp == NULL)
2635 *prev_values = calloc(xstats_display_num,
2636 sizeof(**prev_values));
2637 if (*prev_values == NULL)
2638 goto fail_prev_values;
2640 *curr_values = calloc(xstats_display_num,
2641 sizeof(**curr_values));
2642 if (*curr_values == NULL)
2643 goto fail_curr_values;
2645 ports[pi].xstats_info.allocated = true;
2658 free_xstats_display_info(portid_t pi)
2660 if (!ports[pi].xstats_info.allocated)
2662 free(ports[pi].xstats_info.ids_supp);
2663 free(ports[pi].xstats_info.prev_values);
2664 free(ports[pi].xstats_info.curr_values);
2665 ports[pi].xstats_info.allocated = false;
2668 /** Fill helper structures for specified port to show extended statistics. */
2670 fill_xstats_display_info_for_port(portid_t pi)
2672 unsigned int stat, stat_supp;
2673 const char *xstat_name;
2674 struct rte_port *port;
2678 if (xstats_display_num == 0)
2681 if (pi == (portid_t)RTE_PORT_ALL) {
2682 fill_xstats_display_info();
2687 if (port->port_status != RTE_PORT_STARTED)
2690 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2691 rte_exit(EXIT_FAILURE,
2692 "Failed to allocate xstats display memory\n");
2694 ids_supp = port->xstats_info.ids_supp;
2695 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2696 xstat_name = xstats_display[stat].name;
2697 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2698 ids_supp + stat_supp);
2700 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2701 xstat_name, pi, stat);
2707 port->xstats_info.ids_supp_sz = stat_supp;
2710 /** Fill helper structures for all ports to show extended statistics. */
2712 fill_xstats_display_info(void)
2716 if (xstats_display_num == 0)
2719 RTE_ETH_FOREACH_DEV(pi)
2720 fill_xstats_display_info_for_port(pi);
2724 start_port(portid_t pid)
2726 int diag, need_check_link_status = -1;
2728 portid_t p_pi = RTE_MAX_ETHPORTS;
2729 portid_t pl[RTE_MAX_ETHPORTS];
2730 portid_t peer_pl[RTE_MAX_ETHPORTS];
2731 uint16_t cnt_pi = 0;
2732 uint16_t cfg_pi = 0;
2735 struct rte_port *port;
2736 struct rte_eth_hairpin_cap cap;
2738 if (port_id_is_invalid(pid, ENABLED_WARN))
2741 RTE_ETH_FOREACH_DEV(pi) {
2742 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2745 need_check_link_status = 0;
2747 if (port->port_status == RTE_PORT_STOPPED)
2748 port->port_status = RTE_PORT_HANDLING;
2750 fprintf(stderr, "Port %d is now not stopped\n", pi);
2754 if (port->need_reconfig > 0) {
2755 struct rte_eth_conf dev_conf;
2758 port->need_reconfig = 0;
2760 if (flow_isolate_all) {
2761 int ret = port_flow_isolate(pi, 1);
2764 "Failed to apply isolated mode on port %d\n",
2769 configure_rxtx_dump_callbacks(0);
2770 printf("Configuring Port %d (socket %u)\n", pi,
2772 if (nb_hairpinq > 0 &&
2773 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2775 "Port %d doesn't support hairpin queues\n",
2780 /* configure port */
2781 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2782 nb_txq + nb_hairpinq,
2785 if (port->port_status == RTE_PORT_HANDLING)
2786 port->port_status = RTE_PORT_STOPPED;
2789 "Port %d can not be set back to stopped\n",
2791 fprintf(stderr, "Fail to configure port %d\n",
2793 /* try to reconfigure port next time */
2794 port->need_reconfig = 1;
2797 /* get device configuration*/
2799 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2801 "port %d can not get device configuration\n",
2805 /* Apply Rx offloads configuration */
2806 if (dev_conf.rxmode.offloads !=
2807 port->dev_conf.rxmode.offloads) {
2808 port->dev_conf.rxmode.offloads |=
2809 dev_conf.rxmode.offloads;
2811 k < port->dev_info.max_rx_queues;
2813 port->rx_conf[k].offloads |=
2814 dev_conf.rxmode.offloads;
2816 /* Apply Tx offloads configuration */
2817 if (dev_conf.txmode.offloads !=
2818 port->dev_conf.txmode.offloads) {
2819 port->dev_conf.txmode.offloads |=
2820 dev_conf.txmode.offloads;
2822 k < port->dev_info.max_tx_queues;
2824 port->tx_conf[k].offloads |=
2825 dev_conf.txmode.offloads;
2828 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2829 port->need_reconfig_queues = 0;
2830 /* setup tx queues */
2831 for (qi = 0; qi < nb_txq; qi++) {
2832 if ((numa_support) &&
2833 (txring_numa[pi] != NUMA_NO_CONFIG))
2834 diag = rte_eth_tx_queue_setup(pi, qi,
2835 port->nb_tx_desc[qi],
2837 &(port->tx_conf[qi]));
2839 diag = rte_eth_tx_queue_setup(pi, qi,
2840 port->nb_tx_desc[qi],
2842 &(port->tx_conf[qi]));
2847 /* Fail to setup tx queue, return */
2848 if (port->port_status == RTE_PORT_HANDLING)
2849 port->port_status = RTE_PORT_STOPPED;
2852 "Port %d can not be set back to stopped\n",
2855 "Fail to configure port %d tx queues\n",
2857 /* try to reconfigure queues next time */
2858 port->need_reconfig_queues = 1;
2861 for (qi = 0; qi < nb_rxq; qi++) {
2862 /* setup rx queues */
2863 if ((numa_support) &&
2864 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2865 struct rte_mempool * mp =
2867 (rxring_numa[pi], 0);
2870 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2875 diag = rx_queue_setup(pi, qi,
2876 port->nb_rx_desc[qi],
2878 &(port->rx_conf[qi]),
2881 struct rte_mempool *mp =
2883 (port->socket_id, 0);
2886 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2890 diag = rx_queue_setup(pi, qi,
2891 port->nb_rx_desc[qi],
2893 &(port->rx_conf[qi]),
2899 /* Fail to setup rx queue, return */
2900 if (port->port_status == RTE_PORT_HANDLING)
2901 port->port_status = RTE_PORT_STOPPED;
2904 "Port %d can not be set back to stopped\n",
2907 "Fail to configure port %d rx queues\n",
2909 /* try to reconfigure queues next time */
2910 port->need_reconfig_queues = 1;
2913 /* setup hairpin queues */
2914 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2917 configure_rxtx_dump_callbacks(verbose_level);
2919 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2923 "Port %d: Failed to disable Ptype parsing\n",
2931 diag = eth_dev_start_mp(pi);
2933 fprintf(stderr, "Fail to start port %d: %s\n",
2934 pi, rte_strerror(-diag));
2936 /* Fail to setup rx queue, return */
2937 if (port->port_status == RTE_PORT_HANDLING)
2938 port->port_status = RTE_PORT_STOPPED;
2941 "Port %d can not be set back to stopped\n",
2946 if (port->port_status == RTE_PORT_HANDLING)
2947 port->port_status = RTE_PORT_STARTED;
2949 fprintf(stderr, "Port %d can not be set into started\n",
2952 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2953 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2954 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2956 /* at least one port started, need checking link status */
2957 need_check_link_status = 1;
2962 if (need_check_link_status == 1 && !no_link_check)
2963 check_all_ports_link_status(RTE_PORT_ALL);
2964 else if (need_check_link_status == 0)
2965 fprintf(stderr, "Please stop the ports first\n");
2967 if (hairpin_mode & 0xf) {
2971 /* bind all started hairpin ports */
2972 for (i = 0; i < cfg_pi; i++) {
2974 /* bind current Tx to all peer Rx */
2975 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2976 RTE_MAX_ETHPORTS, 1);
2979 for (j = 0; j < peer_pi; j++) {
2980 if (!port_is_started(peer_pl[j]))
2982 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2985 "Error during binding hairpin Tx port %u to %u: %s\n",
2987 rte_strerror(-diag));
2991 /* bind all peer Tx to current Rx */
2992 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2993 RTE_MAX_ETHPORTS, 0);
2996 for (j = 0; j < peer_pi; j++) {
2997 if (!port_is_started(peer_pl[j]))
2999 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
3002 "Error during binding hairpin Tx port %u to %u: %s\n",
3004 rte_strerror(-diag));
3011 fill_xstats_display_info_for_port(pid);
3018 stop_port(portid_t pid)
3021 struct rte_port *port;
3022 int need_check_link_status = 0;
3023 portid_t peer_pl[RTE_MAX_ETHPORTS];
3026 if (port_id_is_invalid(pid, ENABLED_WARN))
3029 printf("Stopping ports...\n");
3031 RTE_ETH_FOREACH_DEV(pi) {
3032 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3035 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3037 "Please remove port %d from forwarding configuration.\n",
3042 if (port_is_bonding_slave(pi)) {
3044 "Please remove port %d from bonded device.\n",
3050 if (port->port_status == RTE_PORT_STARTED)
3051 port->port_status = RTE_PORT_HANDLING;
3055 if (hairpin_mode & 0xf) {
3058 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3059 /* unbind all peer Tx from current Rx */
3060 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3061 RTE_MAX_ETHPORTS, 0);
3064 for (j = 0; j < peer_pi; j++) {
3065 if (!port_is_started(peer_pl[j]))
3067 rte_eth_hairpin_unbind(peer_pl[j], pi);
3071 if (port->flow_list)
3072 port_flow_flush(pi);
3074 if (eth_dev_stop_mp(pi) != 0)
3075 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3078 if (port->port_status == RTE_PORT_HANDLING)
3079 port->port_status = RTE_PORT_STOPPED;
3081 fprintf(stderr, "Port %d can not be set into stopped\n",
3083 need_check_link_status = 1;
3085 if (need_check_link_status && !no_link_check)
3086 check_all_ports_link_status(RTE_PORT_ALL);
3092 remove_invalid_ports_in(portid_t *array, portid_t *total)
3095 portid_t new_total = 0;
3097 for (i = 0; i < *total; i++)
3098 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3099 array[new_total] = array[i];
3106 remove_invalid_ports(void)
3108 remove_invalid_ports_in(ports_ids, &nb_ports);
3109 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3110 nb_cfg_ports = nb_fwd_ports;
3114 close_port(portid_t pid)
3117 struct rte_port *port;
3119 if (port_id_is_invalid(pid, ENABLED_WARN))
3122 printf("Closing ports...\n");
3124 RTE_ETH_FOREACH_DEV(pi) {
3125 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3128 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3130 "Please remove port %d from forwarding configuration.\n",
3135 if (port_is_bonding_slave(pi)) {
3137 "Please remove port %d from bonded device.\n",
3143 if (port->port_status == RTE_PORT_CLOSED) {
3144 fprintf(stderr, "Port %d is already closed\n", pi);
3148 if (is_proc_primary()) {
3149 port_flow_flush(pi);
3150 port_flex_item_flush(pi);
3151 rte_eth_dev_close(pi);
3154 free_xstats_display_info(pi);
3157 remove_invalid_ports();
3162 reset_port(portid_t pid)
3166 struct rte_port *port;
3168 if (port_id_is_invalid(pid, ENABLED_WARN))
3171 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3172 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3174 "Can not reset port(s), please stop port(s) first.\n");
3178 printf("Resetting ports...\n");
3180 RTE_ETH_FOREACH_DEV(pi) {
3181 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3184 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3186 "Please remove port %d from forwarding configuration.\n",
3191 if (port_is_bonding_slave(pi)) {
3193 "Please remove port %d from bonded device.\n",
3198 diag = rte_eth_dev_reset(pi);
3201 port->need_reconfig = 1;
3202 port->need_reconfig_queues = 1;
3204 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3213 attach_port(char *identifier)
3216 struct rte_dev_iterator iterator;
3218 printf("Attaching a new port...\n");
3220 if (identifier == NULL) {
3221 fprintf(stderr, "Invalid parameters are specified\n");
3225 if (rte_dev_probe(identifier) < 0) {
3226 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3230 /* first attach mode: event */
3231 if (setup_on_probe_event) {
3232 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3233 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3234 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3235 ports[pi].need_setup != 0)
3236 setup_attached_port(pi);
3240 /* second attach mode: iterator */
3241 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3242 /* setup ports matching the devargs used for probing */
3243 if (port_is_forwarding(pi))
3244 continue; /* port was already attached before */
3245 setup_attached_port(pi);
3250 setup_attached_port(portid_t pi)
3252 unsigned int socket_id;
3255 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3256 /* if socket_id is invalid, set to the first available socket. */
3257 if (check_socket_id(socket_id) < 0)
3258 socket_id = socket_ids[0];
3259 reconfig(pi, socket_id);
3260 ret = rte_eth_promiscuous_enable(pi);
3263 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3264 pi, rte_strerror(-ret));
3266 ports_ids[nb_ports++] = pi;
3267 fwd_ports_ids[nb_fwd_ports++] = pi;
3268 nb_cfg_ports = nb_fwd_ports;
3269 ports[pi].need_setup = 0;
3270 ports[pi].port_status = RTE_PORT_STOPPED;
3272 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3277 detach_device(struct rte_device *dev)
3282 fprintf(stderr, "Device already removed\n");
3286 printf("Removing a device...\n");
3288 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3289 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3290 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3291 fprintf(stderr, "Port %u not stopped\n",
3295 port_flow_flush(sibling);
3299 if (rte_dev_remove(dev) < 0) {
3300 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3303 remove_invalid_ports();
3305 printf("Device is detached\n");
3306 printf("Now total ports is %d\n", nb_ports);
3312 detach_port_device(portid_t port_id)
3315 struct rte_eth_dev_info dev_info;
3317 if (port_id_is_invalid(port_id, ENABLED_WARN))
3320 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3321 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3322 fprintf(stderr, "Port not stopped\n");
3325 fprintf(stderr, "Port was not closed\n");
3328 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3331 "Failed to get device info for port %d, not detaching\n",
3335 detach_device(dev_info.device);
3339 detach_devargs(char *identifier)
3341 struct rte_dev_iterator iterator;
3342 struct rte_devargs da;
3345 printf("Removing a device...\n");
3347 memset(&da, 0, sizeof(da));
3348 if (rte_devargs_parsef(&da, "%s", identifier)) {
3349 fprintf(stderr, "cannot parse identifier\n");
3353 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3354 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3355 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3356 fprintf(stderr, "Port %u not stopped\n",
3358 rte_eth_iterator_cleanup(&iterator);
3359 rte_devargs_reset(&da);
3362 port_flow_flush(port_id);
3366 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3367 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3368 da.name, da.bus->name);
3369 rte_devargs_reset(&da);
3373 remove_invalid_ports();
3375 printf("Device %s is detached\n", identifier);
3376 printf("Now total ports is %d\n", nb_ports);
3378 rte_devargs_reset(&da);
3389 stop_packet_forwarding();
3391 #ifndef RTE_EXEC_ENV_WINDOWS
3392 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3394 if (mp_alloc_type == MP_ALLOC_ANON)
3395 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3400 if (ports != NULL) {
3402 RTE_ETH_FOREACH_DEV(pt_id) {
3403 printf("\nStopping port %d...\n", pt_id);
3407 RTE_ETH_FOREACH_DEV(pt_id) {
3408 printf("\nShutting down port %d...\n", pt_id);
3415 ret = rte_dev_event_monitor_stop();
3418 "fail to stop device event monitor.");
3422 ret = rte_dev_event_callback_unregister(NULL,
3423 dev_event_callback, NULL);
3426 "fail to unregister device event callback.\n");
3430 ret = rte_dev_hotplug_handle_disable();
3433 "fail to disable hotplug handling.\n");
3437 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3439 mempool_free_mp(mempools[i]);
3441 free(xstats_display);
3443 printf("\nBye...\n");
3446 typedef void (*cmd_func_t)(void);
3447 struct pmd_test_command {
3448 const char *cmd_name;
3449 cmd_func_t cmd_func;
3452 /* Check the link status of all ports in up to 9s, and print them finally */
3454 check_all_ports_link_status(uint32_t port_mask)
3456 #define CHECK_INTERVAL 100 /* 100ms */
3457 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3459 uint8_t count, all_ports_up, print_flag = 0;
3460 struct rte_eth_link link;
3462 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3464 printf("Checking link statuses...\n");
3466 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3468 RTE_ETH_FOREACH_DEV(portid) {
3469 if ((port_mask & (1 << portid)) == 0)
3471 memset(&link, 0, sizeof(link));
3472 ret = rte_eth_link_get_nowait(portid, &link);
3475 if (print_flag == 1)
3477 "Port %u link get failed: %s\n",
3478 portid, rte_strerror(-ret));
3481 /* print link status if flag set */
3482 if (print_flag == 1) {
3483 rte_eth_link_to_str(link_status,
3484 sizeof(link_status), &link);
3485 printf("Port %d %s\n", portid, link_status);
3488 /* clear all_ports_up flag if any link down */
3489 if (link.link_status == RTE_ETH_LINK_DOWN) {
3494 /* after finally printing all link status, get out */
3495 if (print_flag == 1)
3498 if (all_ports_up == 0) {
3500 rte_delay_ms(CHECK_INTERVAL);
3503 /* set the print_flag if all ports up or timeout */
3504 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3514 rmv_port_callback(void *arg)
3516 int need_to_start = 0;
3517 int org_no_link_check = no_link_check;
3518 portid_t port_id = (intptr_t)arg;
3519 struct rte_eth_dev_info dev_info;
3522 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3524 if (!test_done && port_is_forwarding(port_id)) {
3526 stop_packet_forwarding();
3530 no_link_check = org_no_link_check;
3532 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3535 "Failed to get device info for port %d, not detaching\n",
3538 struct rte_device *device = dev_info.device;
3539 close_port(port_id);
3540 detach_device(device); /* might be already removed or have more ports */
3543 start_packet_forwarding(0);
3546 /* This function is used by the interrupt thread */
3548 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3551 RTE_SET_USED(param);
3552 RTE_SET_USED(ret_param);
3554 if (type >= RTE_ETH_EVENT_MAX) {
3556 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3557 port_id, __func__, type);
3559 } else if (event_print_mask & (UINT32_C(1) << type)) {
3560 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3561 eth_event_desc[type]);
3566 case RTE_ETH_EVENT_NEW:
3567 ports[port_id].need_setup = 1;
3568 ports[port_id].port_status = RTE_PORT_HANDLING;
3570 case RTE_ETH_EVENT_INTR_RMV:
3571 if (port_id_is_invalid(port_id, DISABLED_WARN))
3573 if (rte_eal_alarm_set(100000,
3574 rmv_port_callback, (void *)(intptr_t)port_id))
3576 "Could not set up deferred device removal\n");
3578 case RTE_ETH_EVENT_DESTROY:
3579 ports[port_id].port_status = RTE_PORT_CLOSED;
3580 printf("Port %u is closed\n", port_id);
3589 register_eth_event_callback(void)
3592 enum rte_eth_event_type event;
3594 for (event = RTE_ETH_EVENT_UNKNOWN;
3595 event < RTE_ETH_EVENT_MAX; event++) {
3596 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3601 TESTPMD_LOG(ERR, "Failed to register callback for "
3602 "%s event\n", eth_event_desc[event]);
3610 /* This function is used by the interrupt thread */
3612 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3613 __rte_unused void *arg)
3618 if (type >= RTE_DEV_EVENT_MAX) {
3619 fprintf(stderr, "%s called upon invalid event %d\n",
3625 case RTE_DEV_EVENT_REMOVE:
3626 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3628 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3630 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3635 * Because the user's callback is invoked in eal interrupt
3636 * callback, the interrupt callback need to be finished before
3637 * it can be unregistered when detaching device. So finish
3638 * callback soon and use a deferred removal to detach device
3639 * is need. It is a workaround, once the device detaching be
3640 * moved into the eal in the future, the deferred removal could
3643 if (rte_eal_alarm_set(100000,
3644 rmv_port_callback, (void *)(intptr_t)port_id))
3646 "Could not set up deferred device removal\n");
3648 case RTE_DEV_EVENT_ADD:
3649 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3651 /* TODO: After finish kernel driver binding,
3652 * begin to attach port.
3661 rxtx_port_config(portid_t pid)
3665 struct rte_port *port = &ports[pid];
3667 for (qid = 0; qid < nb_rxq; qid++) {
3668 offloads = port->rx_conf[qid].offloads;
3669 port->rx_conf[qid] = port->dev_info.default_rxconf;
3671 if (rxq_share > 0 &&
3672 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3673 /* Non-zero share group to enable RxQ share. */
3674 port->rx_conf[qid].share_group = pid / rxq_share + 1;
3675 port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3679 port->rx_conf[qid].offloads = offloads;
3681 /* Check if any Rx parameters have been passed */
3682 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3683 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3685 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3686 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3688 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3689 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3691 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3692 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3694 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3695 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3697 port->nb_rx_desc[qid] = nb_rxd;
3700 for (qid = 0; qid < nb_txq; qid++) {
3701 offloads = port->tx_conf[qid].offloads;
3702 port->tx_conf[qid] = port->dev_info.default_txconf;
3704 port->tx_conf[qid].offloads = offloads;
3706 /* Check if any Tx parameters have been passed */
3707 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3708 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3710 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3711 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3713 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3714 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3716 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3717 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3719 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3720 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3722 port->nb_tx_desc[qid] = nb_txd;
3727 * Helper function to set MTU from frame size
3729 * port->dev_info should be set before calling this function.
3731 * return 0 on success, negative on error
3734 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3736 struct rte_port *port = &ports[portid];
3737 uint32_t eth_overhead;
3738 uint16_t mtu, new_mtu;
3740 eth_overhead = get_eth_overhead(&port->dev_info);
3742 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3743 printf("Failed to get MTU for port %u\n", portid);
3747 new_mtu = max_rx_pktlen - eth_overhead;
3752 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3754 "Failed to set MTU to %u for port %u\n",
3759 port->dev_conf.rxmode.mtu = new_mtu;
3765 init_port_config(void)
3768 struct rte_port *port;
3771 RTE_ETH_FOREACH_DEV(pid) {
3773 port->dev_conf.fdir_conf = fdir_conf;
3775 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3780 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3781 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3782 rss_hf & port->dev_info.flow_type_rss_offloads;
3784 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3785 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3788 if (port->dcb_flag == 0) {
3789 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3790 port->dev_conf.rxmode.mq_mode =
3791 (enum rte_eth_rx_mq_mode)
3792 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3794 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3795 port->dev_conf.rxmode.offloads &=
3796 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3799 i < port->dev_info.nb_rx_queues;
3801 port->rx_conf[i].offloads &=
3802 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3806 rxtx_port_config(pid);
3808 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3812 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3813 rte_pmd_ixgbe_bypass_init(pid);
3816 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3817 port->dev_conf.intr_conf.lsc = 1;
3818 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3819 port->dev_conf.intr_conf.rmv = 1;
3823 void set_port_slave_flag(portid_t slave_pid)
3825 struct rte_port *port;
3827 port = &ports[slave_pid];
3828 port->slave_flag = 1;
3831 void clear_port_slave_flag(portid_t slave_pid)
3833 struct rte_port *port;
3835 port = &ports[slave_pid];
3836 port->slave_flag = 0;
3839 uint8_t port_is_bonding_slave(portid_t slave_pid)
3841 struct rte_port *port;
3842 struct rte_eth_dev_info dev_info;
3845 port = &ports[slave_pid];
3846 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3849 "Failed to get device info for port id %d,"
3850 "cannot determine if the port is a bonded slave",
3854 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3859 const uint16_t vlan_tags[] = {
3860 0, 1, 2, 3, 4, 5, 6, 7,
3861 8, 9, 10, 11, 12, 13, 14, 15,
3862 16, 17, 18, 19, 20, 21, 22, 23,
3863 24, 25, 26, 27, 28, 29, 30, 31
3867 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3868 enum dcb_mode_enable dcb_mode,
3869 enum rte_eth_nb_tcs num_tcs,
3874 struct rte_eth_rss_conf rss_conf;
3877 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3878 * given above, and the number of traffic classes available for use.
3880 if (dcb_mode == DCB_VT_ENABLED) {
3881 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3882 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3883 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3884 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3886 /* VMDQ+DCB RX and TX configurations */
3887 vmdq_rx_conf->enable_default_pool = 0;
3888 vmdq_rx_conf->default_pool = 0;
3889 vmdq_rx_conf->nb_queue_pools =
3890 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3891 vmdq_tx_conf->nb_queue_pools =
3892 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3894 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3895 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3896 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3897 vmdq_rx_conf->pool_map[i].pools =
3898 1 << (i % vmdq_rx_conf->nb_queue_pools);
3900 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3901 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3902 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3905 /* set DCB mode of RX and TX of multiple queues */
3906 eth_conf->rxmode.mq_mode =
3907 (enum rte_eth_rx_mq_mode)
3908 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
3909 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
3911 struct rte_eth_dcb_rx_conf *rx_conf =
3912 ð_conf->rx_adv_conf.dcb_rx_conf;
3913 struct rte_eth_dcb_tx_conf *tx_conf =
3914 ð_conf->tx_adv_conf.dcb_tx_conf;
3916 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3918 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3922 rx_conf->nb_tcs = num_tcs;
3923 tx_conf->nb_tcs = num_tcs;
3925 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3926 rx_conf->dcb_tc[i] = i % num_tcs;
3927 tx_conf->dcb_tc[i] = i % num_tcs;
3930 eth_conf->rxmode.mq_mode =
3931 (enum rte_eth_rx_mq_mode)
3932 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
3933 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3934 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
3938 eth_conf->dcb_capability_en =
3939 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
3941 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
3947 init_port_dcb_config(portid_t pid,
3948 enum dcb_mode_enable dcb_mode,
3949 enum rte_eth_nb_tcs num_tcs,
3952 struct rte_eth_conf port_conf;
3953 struct rte_port *rte_port;
3957 if (num_procs > 1) {
3958 printf("The multi-process feature doesn't support dcb.\n");
3961 rte_port = &ports[pid];
3963 /* retain the original device configuration. */
3964 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3966 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3967 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3970 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3971 /* remove RSS HASH offload for DCB in vt mode */
3972 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
3973 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3974 for (i = 0; i < nb_rxq; i++)
3975 rte_port->rx_conf[i].offloads &=
3976 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3979 /* re-configure the device . */
3980 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3984 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3988 /* If dev_info.vmdq_pool_base is greater than 0,
3989 * the queue id of vmdq pools is started after pf queues.
3991 if (dcb_mode == DCB_VT_ENABLED &&
3992 rte_port->dev_info.vmdq_pool_base > 0) {
3994 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3999 /* Assume the ports in testpmd have the same dcb capability
4000 * and has the same number of rxq and txq in dcb mode
4002 if (dcb_mode == DCB_VT_ENABLED) {
4003 if (rte_port->dev_info.max_vfs > 0) {
4004 nb_rxq = rte_port->dev_info.nb_rx_queues;
4005 nb_txq = rte_port->dev_info.nb_tx_queues;
4007 nb_rxq = rte_port->dev_info.max_rx_queues;
4008 nb_txq = rte_port->dev_info.max_tx_queues;
4011 /*if vt is disabled, use all pf queues */
4012 if (rte_port->dev_info.vmdq_pool_base == 0) {
4013 nb_rxq = rte_port->dev_info.max_rx_queues;
4014 nb_txq = rte_port->dev_info.max_tx_queues;
4016 nb_rxq = (queueid_t)num_tcs;
4017 nb_txq = (queueid_t)num_tcs;
4021 rx_free_thresh = 64;
4023 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4025 rxtx_port_config(pid);
4027 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4028 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4029 rx_vft_set(pid, vlan_tags[i], 1);
4031 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4035 rte_port->dcb_flag = 1;
4037 /* Enter DCB configuration status */
4048 /* Configuration of Ethernet ports. */
4049 ports = rte_zmalloc("testpmd: ports",
4050 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4051 RTE_CACHE_LINE_SIZE);
4052 if (ports == NULL) {
4053 rte_exit(EXIT_FAILURE,
4054 "rte_zmalloc(%d struct rte_port) failed\n",
4057 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4058 ports[i].xstats_info.allocated = false;
4059 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4060 LIST_INIT(&ports[i].flow_tunnel_list);
4061 /* Initialize ports NUMA structures */
4062 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4063 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4064 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4078 const char clr[] = { 27, '[', '2', 'J', '\0' };
4079 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4081 /* Clear screen and move to top left */
4082 printf("%s%s", clr, top_left);
4084 printf("\nPort statistics ====================================");
4085 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4086 nic_stats_display(fwd_ports_ids[i]);
4092 signal_handler(int signum)
4094 if (signum == SIGINT || signum == SIGTERM) {
4095 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4097 #ifdef RTE_LIB_PDUMP
4098 /* uninitialize packet capture framework */
4101 #ifdef RTE_LIB_LATENCYSTATS
4102 if (latencystats_enabled != 0)
4103 rte_latencystats_uninit();
4106 /* Set flag to indicate the force termination. */
4108 /* exit with the expected status */
4109 #ifndef RTE_EXEC_ENV_WINDOWS
4110 signal(signum, SIG_DFL);
4111 kill(getpid(), signum);
4117 main(int argc, char** argv)
4124 signal(SIGINT, signal_handler);
4125 signal(SIGTERM, signal_handler);
4127 testpmd_logtype = rte_log_register("testpmd");
4128 if (testpmd_logtype < 0)
4129 rte_exit(EXIT_FAILURE, "Cannot register log type");
4130 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4132 diag = rte_eal_init(argc, argv);
4134 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4135 rte_strerror(rte_errno));
4137 ret = register_eth_event_callback();
4139 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4141 #ifdef RTE_LIB_PDUMP
4142 /* initialize packet capture framework */
4147 RTE_ETH_FOREACH_DEV(port_id) {
4148 ports_ids[count] = port_id;
4151 nb_ports = (portid_t) count;
4153 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4155 /* allocate port structures, and init them */
4158 set_def_fwd_config();
4160 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4161 "Check the core mask argument\n");
4163 /* Bitrate/latency stats disabled by default */
4164 #ifdef RTE_LIB_BITRATESTATS
4165 bitrate_enabled = 0;
4167 #ifdef RTE_LIB_LATENCYSTATS
4168 latencystats_enabled = 0;
4171 /* on FreeBSD, mlockall() is disabled by default */
4172 #ifdef RTE_EXEC_ENV_FREEBSD
4181 launch_args_parse(argc, argv);
4183 #ifndef RTE_EXEC_ENV_WINDOWS
4184 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4185 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4190 if (tx_first && interactive)
4191 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4192 "interactive mode.\n");
4194 if (tx_first && lsc_interrupt) {
4196 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4200 if (!nb_rxq && !nb_txq)
4202 "Warning: Either rx or tx queues should be non-zero\n");
4204 if (nb_rxq > 1 && nb_rxq > nb_txq)
4206 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4212 ret = rte_dev_hotplug_handle_enable();
4215 "fail to enable hotplug handling.");
4219 ret = rte_dev_event_monitor_start();
4222 "fail to start device event monitoring.");
4226 ret = rte_dev_event_callback_register(NULL,
4227 dev_event_callback, NULL);
4230 "fail to register device event callback\n");
4235 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4236 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4238 /* set all ports to promiscuous mode by default */
4239 RTE_ETH_FOREACH_DEV(port_id) {
4240 ret = rte_eth_promiscuous_enable(port_id);
4243 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4244 port_id, rte_strerror(-ret));
4247 #ifdef RTE_LIB_METRICS
4248 /* Init metrics library */
4249 rte_metrics_init(rte_socket_id());
4252 #ifdef RTE_LIB_LATENCYSTATS
4253 if (latencystats_enabled != 0) {
4254 int ret = rte_latencystats_init(1, NULL);
4257 "Warning: latencystats init() returned error %d\n",
4259 fprintf(stderr, "Latencystats running on lcore %d\n",
4260 latencystats_lcore_id);
4264 /* Setup bitrate stats */
4265 #ifdef RTE_LIB_BITRATESTATS
4266 if (bitrate_enabled != 0) {
4267 bitrate_data = rte_stats_bitrate_create();
4268 if (bitrate_data == NULL)
4269 rte_exit(EXIT_FAILURE,
4270 "Could not allocate bitrate data.\n");
4271 rte_stats_bitrate_reg(bitrate_data);
4274 #ifdef RTE_LIB_CMDLINE
4275 if (strlen(cmdline_filename) != 0)
4276 cmdline_read_from_file(cmdline_filename);
4278 if (interactive == 1) {
4280 printf("Start automatic packet forwarding\n");
4281 start_packet_forwarding(0);
4293 printf("No commandline core given, start packet forwarding\n");
4294 start_packet_forwarding(tx_first);
4295 if (stats_period != 0) {
4296 uint64_t prev_time = 0, cur_time, diff_time = 0;
4297 uint64_t timer_period;
4299 /* Convert to number of cycles */
4300 timer_period = stats_period * rte_get_timer_hz();
4302 while (f_quit == 0) {
4303 cur_time = rte_get_timer_cycles();
4304 diff_time += cur_time - prev_time;
4306 if (diff_time >= timer_period) {
4308 /* Reset the timer */
4311 /* Sleep to avoid unnecessary checks */
4312 prev_time = cur_time;
4313 rte_delay_us_sleep(US_PER_S);
4317 printf("Press enter to exit\n");
4318 rc = read(0, &c, 1);
4324 ret = rte_eal_cleanup();
4326 rte_exit(EXIT_FAILURE,
4327 "EAL cleanup failed: %s\n", strerror(-ret));
4329 return EXIT_SUCCESS;