1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
52 #include <rte_pmd_ixgbe.h>
55 #include <rte_pdump.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
65 #ifdef RTE_EXEC_ENV_WINDOWS
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
75 #define HUGE_FLAG MAP_HUGETLB
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
95 char cmdline_filename[PATH_MAX] = {0};
98 * NUMA support configuration.
99 * When set, the NUMA support attempts to dispatch the allocation of the
100 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101 * probed ports among the CPU sockets 0 and 1.
102 * Otherwise, all memory is allocated from CPU socket 0.
104 uint8_t numa_support = 1; /**< numa enabled by default */
107 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110 uint8_t socket_num = UMA_NO_CONFIG;
113 * Select mempool allocation type:
114 * - native: use regular DPDK memory
115 * - anon: use regular DPDK memory to create mempool, but populate using
116 * anonymous memory (may not be IOVA-contiguous)
117 * - xmem: use externally allocated hugepage memory
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
122 * Store specified sockets on which memory pool to be used by ports
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which RX ring to be used by ports
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
134 * Store specified sockets on which TX ring to be used by ports
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
140 * Record the Ethernet address of peer target ports to which packets are
142 * Must be instantiated with the ethernet addresses of peer traffic generator
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
149 * Probed Target Environment.
151 struct rte_port *ports; /**< For all probed ethernet ports. */
152 portid_t nb_ports; /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
159 * Test Forwarding Configuration.
160 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t nb_cfg_ports; /**< Number of configured ports. */
166 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
175 * Forwarding engines.
177 struct fwd_engine * fwd_engines[] = {
187 &five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 &ieee1588_fwd_engine,
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
208 * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
211 /** Extended statistics to show. */
212 struct rte_eth_xstat_name *xstats_display;
214 unsigned int xstats_display_num; /**< Size of extended statistics to show */
217 * In container, it cannot terminate the process which running with 'stats-period'
218 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
223 * Max Rx frame size, set by '--max-pkt-len' parameter.
225 uint32_t max_rx_pkt_len;
228 * Configuration of packet segments used to scatter received packets
229 * if some of split features is configured.
231 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
232 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
233 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
234 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
237 * Configuration of packet segments used by the "txonly" processing engine.
239 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
240 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
241 TXONLY_DEF_PACKET_LEN,
243 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
245 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
246 /**< Split policy for packets to TX. */
248 uint8_t txonly_multi_flow;
249 /**< Whether multiple flows are generated in TXONLY mode. */
251 uint32_t tx_pkt_times_inter;
252 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
254 uint32_t tx_pkt_times_intra;
255 /**< Timings for send scheduling in TXONLY mode, time between packets. */
257 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
258 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
259 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
260 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
262 /* current configuration is in DCB or not,0 means it is not in DCB mode */
263 uint8_t dcb_config = 0;
266 * Configurable number of RX/TX queues.
268 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
269 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
270 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
273 * Configurable number of RX/TX ring descriptors.
274 * Defaults are supplied by drivers via ethdev.
276 #define RTE_TEST_RX_DESC_DEFAULT 0
277 #define RTE_TEST_TX_DESC_DEFAULT 0
278 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
279 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
281 #define RTE_PMD_PARAM_UNSET -1
283 * Configurable values of RX and TX ring threshold registers.
286 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
287 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
288 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
290 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
291 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
292 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
295 * Configurable value of RX free threshold.
297 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
300 * Configurable value of RX drop enable.
302 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
305 * Configurable value of TX free threshold.
307 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
310 * Configurable value of TX RS bit threshold.
312 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
315 * Configurable value of buffered packets before sending.
317 uint16_t noisy_tx_sw_bufsz;
320 * Configurable value of packet buffer timeout.
322 uint16_t noisy_tx_sw_buf_flush_time;
325 * Configurable value for size of VNF internal memory area
326 * used for simulating noisy neighbour behaviour
328 uint64_t noisy_lkup_mem_sz;
331 * Configurable value of number of random writes done in
332 * VNF simulation memory area.
334 uint64_t noisy_lkup_num_writes;
337 * Configurable value of number of random reads done in
338 * VNF simulation memory area.
340 uint64_t noisy_lkup_num_reads;
343 * Configurable value of number of random reads/writes done in
344 * VNF simulation memory area.
346 uint64_t noisy_lkup_num_reads_writes;
349 * Receive Side Scaling (RSS) configuration.
351 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
354 * Port topology configuration
356 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
359 * Avoids to flush all the RX streams before starts forwarding.
361 uint8_t no_flush_rx = 0; /* flush by default */
364 * Flow API isolated mode.
366 uint8_t flow_isolate_all;
369 * Avoids to check link status when starting/stopping a port.
371 uint8_t no_link_check = 0; /* check by default */
374 * Don't automatically start all ports in interactive mode.
376 uint8_t no_device_start = 0;
379 * Enable link status change notification
381 uint8_t lsc_interrupt = 1; /* enabled by default */
384 * Enable device removal notification.
386 uint8_t rmv_interrupt = 1; /* enabled by default */
388 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
390 /* After attach, port setup is called on event or by iterator */
391 bool setup_on_probe_event = true;
393 /* Clear ptypes on port initialization. */
394 uint8_t clear_ptypes = true;
396 /* Hairpin ports configuration mode. */
397 uint16_t hairpin_mode;
399 /* Pretty printing of ethdev events */
400 static const char * const eth_event_desc[] = {
401 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
402 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
403 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
404 [RTE_ETH_EVENT_INTR_RESET] = "reset",
405 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
406 [RTE_ETH_EVENT_IPSEC] = "IPsec",
407 [RTE_ETH_EVENT_MACSEC] = "MACsec",
408 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
409 [RTE_ETH_EVENT_NEW] = "device probed",
410 [RTE_ETH_EVENT_DESTROY] = "device released",
411 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
412 [RTE_ETH_EVENT_MAX] = NULL,
416 * Display or mask ether events
417 * Default to all events except VF_MBOX
419 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
420 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
421 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
422 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
423 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
424 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
425 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
426 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
428 * Decide if all memory are locked for performance.
433 * NIC bypass mode configuration options.
436 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
437 /* The NIC bypass watchdog timeout. */
438 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
442 #ifdef RTE_LIB_LATENCYSTATS
445 * Set when latency stats is enabled in the commandline
447 uint8_t latencystats_enabled;
450 * Lcore ID to serive latency statistics.
452 lcoreid_t latencystats_lcore_id = -1;
457 * Ethernet device configuration.
459 struct rte_eth_rxmode rx_mode;
461 struct rte_eth_txmode tx_mode = {
462 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
465 struct rte_fdir_conf fdir_conf = {
466 .mode = RTE_FDIR_MODE_NONE,
467 .pballoc = RTE_FDIR_PBALLOC_64K,
468 .status = RTE_FDIR_REPORT_STATUS,
470 .vlan_tci_mask = 0xFFEF,
472 .src_ip = 0xFFFFFFFF,
473 .dst_ip = 0xFFFFFFFF,
476 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
477 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
479 .src_port_mask = 0xFFFF,
480 .dst_port_mask = 0xFFFF,
481 .mac_addr_byte_mask = 0xFF,
482 .tunnel_type_mask = 1,
483 .tunnel_id_mask = 0xFFFFFFFF,
488 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
491 * Display zero values by default for xstats
493 uint8_t xstats_hide_zero;
496 * Measure of CPU cycles disabled by default
498 uint8_t record_core_cycles;
501 * Display of RX and TX bursts disabled by default
503 uint8_t record_burst_stats;
506 * Number of ports per shared Rx queue group, 0 disable.
510 unsigned int num_sockets = 0;
511 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
513 #ifdef RTE_LIB_BITRATESTATS
514 /* Bitrate statistics */
515 struct rte_stats_bitrates *bitrate_data;
516 lcoreid_t bitrate_lcore_id;
517 uint8_t bitrate_enabled;
520 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
521 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
524 * hexadecimal bitmask of RX mq mode can be enabled.
526 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
529 * Used to set forced link speed
531 uint32_t eth_link_speed;
534 * ID of the current process in multi-process, used to
535 * configure the queues to be polled.
540 * Number of processes in multi-process, used to
541 * configure the queues to be polled.
543 unsigned int num_procs = 1;
546 eth_rx_metadata_negotiate_mp(uint16_t port_id)
548 uint64_t rx_meta_features = 0;
551 if (!is_proc_primary())
554 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
555 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
556 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
558 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
560 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
561 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
565 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
566 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
570 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
571 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
574 } else if (ret != -ENOTSUP) {
575 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
576 port_id, rte_strerror(-ret));
581 flow_pick_transfer_proxy_mp(uint16_t port_id)
583 struct rte_port *port = &ports[port_id];
586 port->flow_transfer_proxy = port_id;
588 if (!is_proc_primary())
591 ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
594 fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
595 port_id, rte_strerror(-ret));
600 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
601 const struct rte_eth_conf *dev_conf)
603 if (is_proc_primary())
604 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
610 eth_dev_start_mp(uint16_t port_id)
612 if (is_proc_primary())
613 return rte_eth_dev_start(port_id);
619 eth_dev_stop_mp(uint16_t port_id)
621 if (is_proc_primary())
622 return rte_eth_dev_stop(port_id);
628 mempool_free_mp(struct rte_mempool *mp)
630 if (is_proc_primary())
631 rte_mempool_free(mp);
635 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
637 if (is_proc_primary())
638 return rte_eth_dev_set_mtu(port_id, mtu);
643 /* Forward function declarations */
644 static void setup_attached_port(portid_t pi);
645 static void check_all_ports_link_status(uint32_t port_mask);
646 static int eth_event_callback(portid_t port_id,
647 enum rte_eth_event_type type,
648 void *param, void *ret_param);
649 static void dev_event_callback(const char *device_name,
650 enum rte_dev_event_type type,
652 static void fill_xstats_display_info(void);
655 * Check if all the ports are started.
656 * If yes, return positive value. If not, return zero.
658 static int all_ports_started(void);
660 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
661 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
663 /* Holds the registered mbuf dynamic flags names. */
664 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
668 * Helper function to check if socket is already discovered.
669 * If yes, return positive value. If not, return zero.
672 new_socket_id(unsigned int socket_id)
676 for (i = 0; i < num_sockets; i++) {
677 if (socket_ids[i] == socket_id)
684 * Setup default configuration.
687 set_default_fwd_lcores_config(void)
691 unsigned int sock_num;
694 for (i = 0; i < RTE_MAX_LCORE; i++) {
695 if (!rte_lcore_is_enabled(i))
697 sock_num = rte_lcore_to_socket_id(i);
698 if (new_socket_id(sock_num)) {
699 if (num_sockets >= RTE_MAX_NUMA_NODES) {
700 rte_exit(EXIT_FAILURE,
701 "Total sockets greater than %u\n",
704 socket_ids[num_sockets++] = sock_num;
706 if (i == rte_get_main_lcore())
708 fwd_lcores_cpuids[nb_lc++] = i;
710 nb_lcores = (lcoreid_t) nb_lc;
711 nb_cfg_lcores = nb_lcores;
716 set_def_peer_eth_addrs(void)
720 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
721 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
722 peer_eth_addrs[i].addr_bytes[5] = i;
727 set_default_fwd_ports_config(void)
732 RTE_ETH_FOREACH_DEV(pt_id) {
733 fwd_ports_ids[i++] = pt_id;
735 /* Update sockets info according to the attached device */
736 int socket_id = rte_eth_dev_socket_id(pt_id);
737 if (socket_id >= 0 && new_socket_id(socket_id)) {
738 if (num_sockets >= RTE_MAX_NUMA_NODES) {
739 rte_exit(EXIT_FAILURE,
740 "Total sockets greater than %u\n",
743 socket_ids[num_sockets++] = socket_id;
747 nb_cfg_ports = nb_ports;
748 nb_fwd_ports = nb_ports;
752 set_def_fwd_config(void)
754 set_default_fwd_lcores_config();
755 set_def_peer_eth_addrs();
756 set_default_fwd_ports_config();
759 #ifndef RTE_EXEC_ENV_WINDOWS
760 /* extremely pessimistic estimation of memory required to create a mempool */
762 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
764 unsigned int n_pages, mbuf_per_pg, leftover;
765 uint64_t total_mem, mbuf_mem, obj_sz;
767 /* there is no good way to predict how much space the mempool will
768 * occupy because it will allocate chunks on the fly, and some of those
769 * will come from default DPDK memory while some will come from our
770 * external memory, so just assume 128MB will be enough for everyone.
772 uint64_t hdr_mem = 128 << 20;
774 /* account for possible non-contiguousness */
775 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
777 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
781 mbuf_per_pg = pgsz / obj_sz;
782 leftover = (nb_mbufs % mbuf_per_pg) > 0;
783 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
785 mbuf_mem = n_pages * pgsz;
787 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
789 if (total_mem > SIZE_MAX) {
790 TESTPMD_LOG(ERR, "Memory size too big\n");
793 *out = (size_t)total_mem;
799 pagesz_flags(uint64_t page_sz)
801 /* as per mmap() manpage, all page sizes are log2 of page size
802 * shifted by MAP_HUGE_SHIFT
804 int log2 = rte_log2_u64(page_sz);
806 return (log2 << HUGE_SHIFT);
810 alloc_mem(size_t memsz, size_t pgsz, bool huge)
815 /* allocate anonymous hugepages */
816 flags = MAP_ANONYMOUS | MAP_PRIVATE;
818 flags |= HUGE_FLAG | pagesz_flags(pgsz);
820 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
821 if (addr == MAP_FAILED)
827 struct extmem_param {
831 rte_iova_t *iova_table;
832 unsigned int iova_table_len;
836 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
839 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
840 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
841 unsigned int cur_page, n_pages, pgsz_idx;
842 size_t mem_sz, cur_pgsz;
843 rte_iova_t *iovas = NULL;
847 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
848 /* skip anything that is too big */
849 if (pgsizes[pgsz_idx] > SIZE_MAX)
852 cur_pgsz = pgsizes[pgsz_idx];
854 /* if we were told not to allocate hugepages, override */
856 cur_pgsz = sysconf(_SC_PAGESIZE);
858 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
860 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
864 /* allocate our memory */
865 addr = alloc_mem(mem_sz, cur_pgsz, huge);
867 /* if we couldn't allocate memory with a specified page size,
868 * that doesn't mean we can't do it with other page sizes, so
874 /* store IOVA addresses for every page in this memory area */
875 n_pages = mem_sz / cur_pgsz;
877 iovas = malloc(sizeof(*iovas) * n_pages);
880 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
883 /* lock memory if it's not huge pages */
887 /* populate IOVA addresses */
888 for (cur_page = 0; cur_page < n_pages; cur_page++) {
893 offset = cur_pgsz * cur_page;
894 cur = RTE_PTR_ADD(addr, offset);
896 /* touch the page before getting its IOVA */
897 *(volatile char *)cur = 0;
899 iova = rte_mem_virt2iova(cur);
901 iovas[cur_page] = iova;
906 /* if we couldn't allocate anything */
912 param->pgsz = cur_pgsz;
913 param->iova_table = iovas;
914 param->iova_table_len = n_pages;
921 munmap(addr, mem_sz);
927 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
929 struct extmem_param param;
932 memset(¶m, 0, sizeof(param));
934 /* check if our heap exists */
935 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
937 /* create our heap */
938 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
940 TESTPMD_LOG(ERR, "Cannot create heap\n");
945 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
947 TESTPMD_LOG(ERR, "Cannot create memory area\n");
951 /* we now have a valid memory area, so add it to heap */
952 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
953 param.addr, param.len, param.iova_table,
954 param.iova_table_len, param.pgsz);
956 /* when using VFIO, memory is automatically mapped for DMA by EAL */
958 /* not needed any more */
959 free(param.iova_table);
962 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
963 munmap(param.addr, param.len);
969 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
975 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
976 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
981 RTE_ETH_FOREACH_DEV(pid) {
982 struct rte_eth_dev_info dev_info;
984 ret = eth_dev_info_get_print_err(pid, &dev_info);
987 "unable to get device info for port %d on addr 0x%p,"
988 "mempool unmapping will not be performed\n",
993 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
996 "unable to DMA unmap addr 0x%p "
998 memhdr->addr, dev_info.device->name);
1001 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1004 "unable to un-register addr 0x%p\n", memhdr->addr);
1009 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1010 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1013 size_t page_size = sysconf(_SC_PAGESIZE);
1016 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1020 "unable to register addr 0x%p\n", memhdr->addr);
1023 RTE_ETH_FOREACH_DEV(pid) {
1024 struct rte_eth_dev_info dev_info;
1026 ret = eth_dev_info_get_print_err(pid, &dev_info);
1029 "unable to get device info for port %d on addr 0x%p,"
1030 "mempool mapping will not be performed\n",
1034 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1037 "unable to DMA map addr 0x%p "
1039 memhdr->addr, dev_info.device->name);
1046 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1047 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1049 struct rte_pktmbuf_extmem *xmem;
1050 unsigned int ext_num, zone_num, elt_num;
1053 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1054 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1055 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1057 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1059 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1060 "external buffer descriptors\n");
1064 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1065 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1066 const struct rte_memzone *mz;
1067 char mz_name[RTE_MEMZONE_NAMESIZE];
1070 ret = snprintf(mz_name, sizeof(mz_name),
1071 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1072 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1073 errno = ENAMETOOLONG;
1077 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1079 RTE_MEMZONE_IOVA_CONTIG |
1081 RTE_MEMZONE_SIZE_HINT_ONLY,
1085 * The caller exits on external buffer creation
1086 * error, so there is no need to free memzones.
1092 xseg->buf_ptr = mz->addr;
1093 xseg->buf_iova = mz->iova;
1094 xseg->buf_len = EXTBUF_ZONE_SIZE;
1095 xseg->elt_size = elt_size;
1097 if (ext_num == 0 && xmem != NULL) {
1106 * Configuration initialisation done once at init time.
1108 static struct rte_mempool *
1109 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1110 unsigned int socket_id, uint16_t size_idx)
1112 char pool_name[RTE_MEMPOOL_NAMESIZE];
1113 struct rte_mempool *rte_mp = NULL;
1114 #ifndef RTE_EXEC_ENV_WINDOWS
1117 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1119 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1120 if (!is_proc_primary()) {
1121 rte_mp = rte_mempool_lookup(pool_name);
1123 rte_exit(EXIT_FAILURE,
1124 "Get mbuf pool for socket %u failed: %s\n",
1125 socket_id, rte_strerror(rte_errno));
1130 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1131 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1133 switch (mp_alloc_type) {
1134 case MP_ALLOC_NATIVE:
1136 /* wrapper to rte_mempool_create() */
1137 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1138 rte_mbuf_best_mempool_ops());
1139 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1140 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1143 #ifndef RTE_EXEC_ENV_WINDOWS
1146 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1147 mb_size, (unsigned int) mb_mempool_cache,
1148 sizeof(struct rte_pktmbuf_pool_private),
1149 socket_id, mempool_flags);
1153 if (rte_mempool_populate_anon(rte_mp) == 0) {
1154 rte_mempool_free(rte_mp);
1158 rte_pktmbuf_pool_init(rte_mp, NULL);
1159 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1160 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1164 case MP_ALLOC_XMEM_HUGE:
1167 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1169 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1170 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1173 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1174 if (heap_socket < 0)
1175 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1177 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1178 rte_mbuf_best_mempool_ops());
1179 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1180 mb_mempool_cache, 0, mbuf_seg_size,
1187 struct rte_pktmbuf_extmem *ext_mem;
1188 unsigned int ext_num;
1190 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1191 socket_id, pool_name, &ext_mem);
1193 rte_exit(EXIT_FAILURE,
1194 "Can't create pinned data buffers\n");
1196 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1197 rte_mbuf_best_mempool_ops());
1198 rte_mp = rte_pktmbuf_pool_create_extbuf
1199 (pool_name, nb_mbuf, mb_mempool_cache,
1200 0, mbuf_seg_size, socket_id,
1207 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1211 #ifndef RTE_EXEC_ENV_WINDOWS
1214 if (rte_mp == NULL) {
1215 rte_exit(EXIT_FAILURE,
1216 "Creation of mbuf pool for socket %u failed: %s\n",
1217 socket_id, rte_strerror(rte_errno));
1218 } else if (verbose_level > 0) {
1219 rte_mempool_dump(stdout, rte_mp);
1225 * Check given socket id is valid or not with NUMA mode,
1226 * if valid, return 0, else return -1
1229 check_socket_id(const unsigned int socket_id)
1231 static int warning_once = 0;
1233 if (new_socket_id(socket_id)) {
1234 if (!warning_once && numa_support)
1236 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1244 * Get the allowed maximum number of RX queues.
1245 * *pid return the port id which has minimal value of
1246 * max_rx_queues in all ports.
1249 get_allowed_max_nb_rxq(portid_t *pid)
1251 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1252 bool max_rxq_valid = false;
1254 struct rte_eth_dev_info dev_info;
1256 RTE_ETH_FOREACH_DEV(pi) {
1257 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1260 max_rxq_valid = true;
1261 if (dev_info.max_rx_queues < allowed_max_rxq) {
1262 allowed_max_rxq = dev_info.max_rx_queues;
1266 return max_rxq_valid ? allowed_max_rxq : 0;
1270 * Check input rxq is valid or not.
1271 * If input rxq is not greater than any of maximum number
1272 * of RX queues of all ports, it is valid.
1273 * if valid, return 0, else return -1
1276 check_nb_rxq(queueid_t rxq)
1278 queueid_t allowed_max_rxq;
1281 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1282 if (rxq > allowed_max_rxq) {
1284 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1285 rxq, allowed_max_rxq, pid);
1292 * Get the allowed maximum number of TX queues.
1293 * *pid return the port id which has minimal value of
1294 * max_tx_queues in all ports.
1297 get_allowed_max_nb_txq(portid_t *pid)
1299 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1300 bool max_txq_valid = false;
1302 struct rte_eth_dev_info dev_info;
1304 RTE_ETH_FOREACH_DEV(pi) {
1305 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1308 max_txq_valid = true;
1309 if (dev_info.max_tx_queues < allowed_max_txq) {
1310 allowed_max_txq = dev_info.max_tx_queues;
1314 return max_txq_valid ? allowed_max_txq : 0;
1318 * Check input txq is valid or not.
1319 * If input txq is not greater than any of maximum number
1320 * of TX queues of all ports, it is valid.
1321 * if valid, return 0, else return -1
1324 check_nb_txq(queueid_t txq)
1326 queueid_t allowed_max_txq;
1329 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1330 if (txq > allowed_max_txq) {
1332 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1333 txq, allowed_max_txq, pid);
1340 * Get the allowed maximum number of RXDs of every rx queue.
1341 * *pid return the port id which has minimal value of
1342 * max_rxd in all queues of all ports.
1345 get_allowed_max_nb_rxd(portid_t *pid)
1347 uint16_t allowed_max_rxd = UINT16_MAX;
1349 struct rte_eth_dev_info dev_info;
1351 RTE_ETH_FOREACH_DEV(pi) {
1352 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1355 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1356 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1360 return allowed_max_rxd;
1364 * Get the allowed minimal number of RXDs of every rx queue.
1365 * *pid return the port id which has minimal value of
1366 * min_rxd in all queues of all ports.
1369 get_allowed_min_nb_rxd(portid_t *pid)
1371 uint16_t allowed_min_rxd = 0;
1373 struct rte_eth_dev_info dev_info;
1375 RTE_ETH_FOREACH_DEV(pi) {
1376 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1379 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1380 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1385 return allowed_min_rxd;
1389 * Check input rxd is valid or not.
1390 * If input rxd is not greater than any of maximum number
1391 * of RXDs of every Rx queues and is not less than any of
1392 * minimal number of RXDs of every Rx queues, it is valid.
1393 * if valid, return 0, else return -1
1396 check_nb_rxd(queueid_t rxd)
1398 uint16_t allowed_max_rxd;
1399 uint16_t allowed_min_rxd;
1402 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1403 if (rxd > allowed_max_rxd) {
1405 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1406 rxd, allowed_max_rxd, pid);
1410 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1411 if (rxd < allowed_min_rxd) {
1413 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1414 rxd, allowed_min_rxd, pid);
1422 * Get the allowed maximum number of TXDs of every rx queues.
1423 * *pid return the port id which has minimal value of
1424 * max_txd in every tx queue.
1427 get_allowed_max_nb_txd(portid_t *pid)
1429 uint16_t allowed_max_txd = UINT16_MAX;
1431 struct rte_eth_dev_info dev_info;
1433 RTE_ETH_FOREACH_DEV(pi) {
1434 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1437 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1438 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1442 return allowed_max_txd;
1446 * Get the allowed maximum number of TXDs of every tx queues.
1447 * *pid return the port id which has minimal value of
1448 * min_txd in every tx queue.
1451 get_allowed_min_nb_txd(portid_t *pid)
1453 uint16_t allowed_min_txd = 0;
1455 struct rte_eth_dev_info dev_info;
1457 RTE_ETH_FOREACH_DEV(pi) {
1458 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1461 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1462 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1467 return allowed_min_txd;
1471 * Check input txd is valid or not.
1472 * If input txd is not greater than any of maximum number
1473 * of TXDs of every Rx queues, it is valid.
1474 * if valid, return 0, else return -1
1477 check_nb_txd(queueid_t txd)
1479 uint16_t allowed_max_txd;
1480 uint16_t allowed_min_txd;
1483 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1484 if (txd > allowed_max_txd) {
1486 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1487 txd, allowed_max_txd, pid);
1491 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1492 if (txd < allowed_min_txd) {
1494 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1495 txd, allowed_min_txd, pid);
1503 * Get the allowed maximum number of hairpin queues.
1504 * *pid return the port id which has minimal value of
1505 * max_hairpin_queues in all ports.
1508 get_allowed_max_nb_hairpinq(portid_t *pid)
1510 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1512 struct rte_eth_hairpin_cap cap;
1514 RTE_ETH_FOREACH_DEV(pi) {
1515 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1519 if (cap.max_nb_queues < allowed_max_hairpinq) {
1520 allowed_max_hairpinq = cap.max_nb_queues;
1524 return allowed_max_hairpinq;
1528 * Check input hairpin is valid or not.
1529 * If input hairpin is not greater than any of maximum number
1530 * of hairpin queues of all ports, it is valid.
1531 * if valid, return 0, else return -1
1534 check_nb_hairpinq(queueid_t hairpinq)
1536 queueid_t allowed_max_hairpinq;
1539 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1540 if (hairpinq > allowed_max_hairpinq) {
1542 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1543 hairpinq, allowed_max_hairpinq, pid);
1550 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1552 uint32_t eth_overhead;
1554 if (dev_info->max_mtu != UINT16_MAX &&
1555 dev_info->max_rx_pktlen > dev_info->max_mtu)
1556 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1558 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1560 return eth_overhead;
1564 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1566 struct rte_port *port = &ports[pid];
1570 eth_rx_metadata_negotiate_mp(pid);
1571 flow_pick_transfer_proxy_mp(pid);
1573 port->dev_conf.txmode = tx_mode;
1574 port->dev_conf.rxmode = rx_mode;
1576 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1578 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1580 if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1581 port->dev_conf.txmode.offloads &=
1582 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1584 /* Apply Rx offloads configuration */
1585 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1586 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1587 /* Apply Tx offloads configuration */
1588 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1589 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1592 port->dev_conf.link_speeds = eth_link_speed;
1595 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1596 get_eth_overhead(&port->dev_info);
1598 /* set flag to initialize port/queue */
1599 port->need_reconfig = 1;
1600 port->need_reconfig_queues = 1;
1601 port->socket_id = socket_id;
1602 port->tx_metadata = 0;
1605 * Check for maximum number of segments per MTU.
1606 * Accordingly update the mbuf data size.
1608 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1609 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1610 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1613 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1614 uint16_t data_size = (mtu + eth_overhead) /
1615 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1616 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1618 if (buffer_size > mbuf_data_size[0]) {
1619 mbuf_data_size[0] = buffer_size;
1620 TESTPMD_LOG(WARNING,
1621 "Configured mbuf size of the first segment %hu\n",
1632 struct rte_mempool *mbp;
1633 unsigned int nb_mbuf_per_pool;
1635 struct rte_gro_param gro_param;
1638 /* Configuration of logical cores. */
1639 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1640 sizeof(struct fwd_lcore *) * nb_lcores,
1641 RTE_CACHE_LINE_SIZE);
1642 if (fwd_lcores == NULL) {
1643 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1644 "failed\n", nb_lcores);
1646 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1647 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1648 sizeof(struct fwd_lcore),
1649 RTE_CACHE_LINE_SIZE);
1650 if (fwd_lcores[lc_id] == NULL) {
1651 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1654 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1657 RTE_ETH_FOREACH_DEV(pid) {
1661 socket_id = port_numa[pid];
1662 if (port_numa[pid] == NUMA_NO_CONFIG) {
1663 socket_id = rte_eth_dev_socket_id(pid);
1666 * if socket_id is invalid,
1667 * set to the first available socket.
1669 if (check_socket_id(socket_id) < 0)
1670 socket_id = socket_ids[0];
1673 socket_id = (socket_num == UMA_NO_CONFIG) ?
1676 /* Apply default TxRx configuration for all ports */
1677 init_config_port_offloads(pid, socket_id);
1680 * Create pools of mbuf.
1681 * If NUMA support is disabled, create a single pool of mbuf in
1682 * socket 0 memory by default.
1683 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1685 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1686 * nb_txd can be configured at run time.
1688 if (param_total_num_mbufs)
1689 nb_mbuf_per_pool = param_total_num_mbufs;
1691 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1692 (nb_lcores * mb_mempool_cache) +
1693 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1694 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1700 for (i = 0; i < num_sockets; i++)
1701 for (j = 0; j < mbuf_data_size_n; j++)
1702 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1703 mbuf_pool_create(mbuf_data_size[j],
1709 for (i = 0; i < mbuf_data_size_n; i++)
1710 mempools[i] = mbuf_pool_create
1713 socket_num == UMA_NO_CONFIG ?
1719 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1720 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1722 * Records which Mbuf pool to use by each logical core, if needed.
1724 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1725 mbp = mbuf_pool_find(
1726 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1729 mbp = mbuf_pool_find(0, 0);
1730 fwd_lcores[lc_id]->mbp = mbp;
1731 /* initialize GSO context */
1732 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1733 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1734 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1735 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1737 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1742 /* create a gro context for each lcore */
1743 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1744 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1745 gro_param.max_item_per_flow = MAX_PKT_BURST;
1746 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1747 gro_param.socket_id = rte_lcore_to_socket_id(
1748 fwd_lcores_cpuids[lc_id]);
1749 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1750 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1751 rte_exit(EXIT_FAILURE,
1752 "rte_gro_ctx_create() failed\n");
1759 reconfig(portid_t new_port_id, unsigned socket_id)
1761 /* Reconfiguration of Ethernet ports. */
1762 init_config_port_offloads(new_port_id, socket_id);
1768 init_fwd_streams(void)
1771 struct rte_port *port;
1772 streamid_t sm_id, nb_fwd_streams_new;
1775 /* set socket id according to numa or not */
1776 RTE_ETH_FOREACH_DEV(pid) {
1778 if (nb_rxq > port->dev_info.max_rx_queues) {
1780 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1781 nb_rxq, port->dev_info.max_rx_queues);
1784 if (nb_txq > port->dev_info.max_tx_queues) {
1786 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1787 nb_txq, port->dev_info.max_tx_queues);
1791 if (port_numa[pid] != NUMA_NO_CONFIG)
1792 port->socket_id = port_numa[pid];
1794 port->socket_id = rte_eth_dev_socket_id(pid);
1797 * if socket_id is invalid,
1798 * set to the first available socket.
1800 if (check_socket_id(port->socket_id) < 0)
1801 port->socket_id = socket_ids[0];
1805 if (socket_num == UMA_NO_CONFIG)
1806 port->socket_id = 0;
1808 port->socket_id = socket_num;
1812 q = RTE_MAX(nb_rxq, nb_txq);
1815 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1818 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1819 if (nb_fwd_streams_new == nb_fwd_streams)
1822 if (fwd_streams != NULL) {
1823 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1824 if (fwd_streams[sm_id] == NULL)
1826 rte_free(fwd_streams[sm_id]);
1827 fwd_streams[sm_id] = NULL;
1829 rte_free(fwd_streams);
1834 nb_fwd_streams = nb_fwd_streams_new;
1835 if (nb_fwd_streams) {
1836 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1837 sizeof(struct fwd_stream *) * nb_fwd_streams,
1838 RTE_CACHE_LINE_SIZE);
1839 if (fwd_streams == NULL)
1840 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1841 " (struct fwd_stream *)) failed\n",
1844 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1845 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1846 " struct fwd_stream", sizeof(struct fwd_stream),
1847 RTE_CACHE_LINE_SIZE);
1848 if (fwd_streams[sm_id] == NULL)
1849 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1850 "(struct fwd_stream) failed\n");
1858 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1860 uint64_t total_burst, sburst;
1862 uint64_t burst_stats[4];
1863 uint16_t pktnb_stats[4];
1865 int burst_percent[4], sburstp;
1869 * First compute the total number of packet bursts and the
1870 * two highest numbers of bursts of the same number of packets.
1872 memset(&burst_stats, 0x0, sizeof(burst_stats));
1873 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1875 /* Show stats for 0 burst size always */
1876 total_burst = pbs->pkt_burst_spread[0];
1877 burst_stats[0] = pbs->pkt_burst_spread[0];
1880 /* Find the next 2 burst sizes with highest occurrences. */
1881 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1882 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1887 total_burst += nb_burst;
1889 if (nb_burst > burst_stats[1]) {
1890 burst_stats[2] = burst_stats[1];
1891 pktnb_stats[2] = pktnb_stats[1];
1892 burst_stats[1] = nb_burst;
1893 pktnb_stats[1] = nb_pkt;
1894 } else if (nb_burst > burst_stats[2]) {
1895 burst_stats[2] = nb_burst;
1896 pktnb_stats[2] = nb_pkt;
1899 if (total_burst == 0)
1902 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1903 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1905 printf("%d%% of other]\n", 100 - sburstp);
1909 sburst += burst_stats[i];
1910 if (sburst == total_burst) {
1911 printf("%d%% of %d pkts]\n",
1912 100 - sburstp, (int) pktnb_stats[i]);
1917 (double)burst_stats[i] / total_burst * 100;
1918 printf("%d%% of %d pkts + ",
1919 burst_percent[i], (int) pktnb_stats[i]);
1920 sburstp += burst_percent[i];
1925 fwd_stream_stats_display(streamid_t stream_id)
1927 struct fwd_stream *fs;
1928 static const char *fwd_top_stats_border = "-------";
1930 fs = fwd_streams[stream_id];
1931 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1932 (fs->fwd_dropped == 0))
1934 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1935 "TX Port=%2d/Queue=%2d %s\n",
1936 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1937 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1938 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1939 " TX-dropped: %-14"PRIu64,
1940 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1942 /* if checksum mode */
1943 if (cur_fwd_eng == &csum_fwd_engine) {
1944 printf(" RX- bad IP checksum: %-14"PRIu64
1945 " Rx- bad L4 checksum: %-14"PRIu64
1946 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1947 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1948 fs->rx_bad_outer_l4_csum);
1949 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1950 fs->rx_bad_outer_ip_csum);
1955 if (record_burst_stats) {
1956 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1957 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1962 fwd_stats_display(void)
1964 static const char *fwd_stats_border = "----------------------";
1965 static const char *acc_stats_border = "+++++++++++++++";
1967 struct fwd_stream *rx_stream;
1968 struct fwd_stream *tx_stream;
1969 uint64_t tx_dropped;
1970 uint64_t rx_bad_ip_csum;
1971 uint64_t rx_bad_l4_csum;
1972 uint64_t rx_bad_outer_l4_csum;
1973 uint64_t rx_bad_outer_ip_csum;
1974 } ports_stats[RTE_MAX_ETHPORTS];
1975 uint64_t total_rx_dropped = 0;
1976 uint64_t total_tx_dropped = 0;
1977 uint64_t total_rx_nombuf = 0;
1978 struct rte_eth_stats stats;
1979 uint64_t fwd_cycles = 0;
1980 uint64_t total_recv = 0;
1981 uint64_t total_xmit = 0;
1982 struct rte_port *port;
1987 memset(ports_stats, 0, sizeof(ports_stats));
1989 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1990 struct fwd_stream *fs = fwd_streams[sm_id];
1992 if (cur_fwd_config.nb_fwd_streams >
1993 cur_fwd_config.nb_fwd_ports) {
1994 fwd_stream_stats_display(sm_id);
1996 ports_stats[fs->tx_port].tx_stream = fs;
1997 ports_stats[fs->rx_port].rx_stream = fs;
2000 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2002 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2003 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2004 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2005 fs->rx_bad_outer_l4_csum;
2006 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2007 fs->rx_bad_outer_ip_csum;
2009 if (record_core_cycles)
2010 fwd_cycles += fs->core_cycles;
2012 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2013 pt_id = fwd_ports_ids[i];
2014 port = &ports[pt_id];
2016 rte_eth_stats_get(pt_id, &stats);
2017 stats.ipackets -= port->stats.ipackets;
2018 stats.opackets -= port->stats.opackets;
2019 stats.ibytes -= port->stats.ibytes;
2020 stats.obytes -= port->stats.obytes;
2021 stats.imissed -= port->stats.imissed;
2022 stats.oerrors -= port->stats.oerrors;
2023 stats.rx_nombuf -= port->stats.rx_nombuf;
2025 total_recv += stats.ipackets;
2026 total_xmit += stats.opackets;
2027 total_rx_dropped += stats.imissed;
2028 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2029 total_tx_dropped += stats.oerrors;
2030 total_rx_nombuf += stats.rx_nombuf;
2032 printf("\n %s Forward statistics for port %-2d %s\n",
2033 fwd_stats_border, pt_id, fwd_stats_border);
2035 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2036 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2037 stats.ipackets + stats.imissed);
2039 if (cur_fwd_eng == &csum_fwd_engine) {
2040 printf(" Bad-ipcsum: %-14"PRIu64
2041 " Bad-l4csum: %-14"PRIu64
2042 "Bad-outer-l4csum: %-14"PRIu64"\n",
2043 ports_stats[pt_id].rx_bad_ip_csum,
2044 ports_stats[pt_id].rx_bad_l4_csum,
2045 ports_stats[pt_id].rx_bad_outer_l4_csum);
2046 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2047 ports_stats[pt_id].rx_bad_outer_ip_csum);
2049 if (stats.ierrors + stats.rx_nombuf > 0) {
2050 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2051 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2054 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2055 "TX-total: %-"PRIu64"\n",
2056 stats.opackets, ports_stats[pt_id].tx_dropped,
2057 stats.opackets + ports_stats[pt_id].tx_dropped);
2059 if (record_burst_stats) {
2060 if (ports_stats[pt_id].rx_stream)
2061 pkt_burst_stats_display("RX",
2062 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2063 if (ports_stats[pt_id].tx_stream)
2064 pkt_burst_stats_display("TX",
2065 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2068 printf(" %s--------------------------------%s\n",
2069 fwd_stats_border, fwd_stats_border);
2072 printf("\n %s Accumulated forward statistics for all ports"
2074 acc_stats_border, acc_stats_border);
2075 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2077 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2079 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2080 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2081 if (total_rx_nombuf > 0)
2082 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2083 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2085 acc_stats_border, acc_stats_border);
2086 if (record_core_cycles) {
2087 #define CYC_PER_MHZ 1E6
2088 if (total_recv > 0 || total_xmit > 0) {
2089 uint64_t total_pkts = 0;
2090 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2091 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2092 total_pkts = total_xmit;
2094 total_pkts = total_recv;
2096 printf("\n CPU cycles/packet=%.2F (total cycles="
2097 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2099 (double) fwd_cycles / total_pkts,
2100 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2101 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2107 fwd_stats_reset(void)
2113 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2114 pt_id = fwd_ports_ids[i];
2115 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2117 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2118 struct fwd_stream *fs = fwd_streams[sm_id];
2122 fs->fwd_dropped = 0;
2123 fs->rx_bad_ip_csum = 0;
2124 fs->rx_bad_l4_csum = 0;
2125 fs->rx_bad_outer_l4_csum = 0;
2126 fs->rx_bad_outer_ip_csum = 0;
2128 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2129 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2130 fs->core_cycles = 0;
2135 flush_fwd_rx_queues(void)
2137 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2144 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2145 uint64_t timer_period;
2147 if (num_procs > 1) {
2148 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2152 /* convert to number of cycles */
2153 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2155 for (j = 0; j < 2; j++) {
2156 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2157 for (rxq = 0; rxq < nb_rxq; rxq++) {
2158 port_id = fwd_ports_ids[rxp];
2160 * testpmd can stuck in the below do while loop
2161 * if rte_eth_rx_burst() always returns nonzero
2162 * packets. So timer is added to exit this loop
2163 * after 1sec timer expiry.
2165 prev_tsc = rte_rdtsc();
2167 nb_rx = rte_eth_rx_burst(port_id, rxq,
2168 pkts_burst, MAX_PKT_BURST);
2169 for (i = 0; i < nb_rx; i++)
2170 rte_pktmbuf_free(pkts_burst[i]);
2172 cur_tsc = rte_rdtsc();
2173 diff_tsc = cur_tsc - prev_tsc;
2174 timer_tsc += diff_tsc;
2175 } while ((nb_rx > 0) &&
2176 (timer_tsc < timer_period));
2180 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2185 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2187 struct fwd_stream **fsm;
2190 #ifdef RTE_LIB_BITRATESTATS
2191 uint64_t tics_per_1sec;
2192 uint64_t tics_datum;
2193 uint64_t tics_current;
2194 uint16_t i, cnt_ports;
2196 cnt_ports = nb_ports;
2197 tics_datum = rte_rdtsc();
2198 tics_per_1sec = rte_get_timer_hz();
2200 fsm = &fwd_streams[fc->stream_idx];
2201 nb_fs = fc->stream_nb;
2203 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2204 (*pkt_fwd)(fsm[sm_id]);
2205 #ifdef RTE_LIB_BITRATESTATS
2206 if (bitrate_enabled != 0 &&
2207 bitrate_lcore_id == rte_lcore_id()) {
2208 tics_current = rte_rdtsc();
2209 if (tics_current - tics_datum >= tics_per_1sec) {
2210 /* Periodic bitrate calculation */
2211 for (i = 0; i < cnt_ports; i++)
2212 rte_stats_bitrate_calc(bitrate_data,
2214 tics_datum = tics_current;
2218 #ifdef RTE_LIB_LATENCYSTATS
2219 if (latencystats_enabled != 0 &&
2220 latencystats_lcore_id == rte_lcore_id())
2221 rte_latencystats_update();
2224 } while (! fc->stopped);
2228 start_pkt_forward_on_core(void *fwd_arg)
2230 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2231 cur_fwd_config.fwd_eng->packet_fwd);
2236 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2237 * Used to start communication flows in network loopback test configurations.
2240 run_one_txonly_burst_on_core(void *fwd_arg)
2242 struct fwd_lcore *fwd_lc;
2243 struct fwd_lcore tmp_lcore;
2245 fwd_lc = (struct fwd_lcore *) fwd_arg;
2246 tmp_lcore = *fwd_lc;
2247 tmp_lcore.stopped = 1;
2248 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2253 * Launch packet forwarding:
2254 * - Setup per-port forwarding context.
2255 * - launch logical cores with their forwarding configuration.
2258 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2264 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2265 lc_id = fwd_lcores_cpuids[i];
2266 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2267 fwd_lcores[i]->stopped = 0;
2268 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2269 fwd_lcores[i], lc_id);
2272 "launch lcore %u failed - diag=%d\n",
2279 * Launch packet forwarding configuration.
2282 start_packet_forwarding(int with_tx_first)
2284 port_fwd_begin_t port_fwd_begin;
2285 port_fwd_end_t port_fwd_end;
2288 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2289 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2291 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2292 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2294 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2295 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2296 (!nb_rxq || !nb_txq))
2297 rte_exit(EXIT_FAILURE,
2298 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2299 cur_fwd_eng->fwd_mode_name);
2301 if (all_ports_started() == 0) {
2302 fprintf(stderr, "Not all ports were started\n");
2305 if (test_done == 0) {
2306 fprintf(stderr, "Packet forwarding already started\n");
2312 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2313 if (port_fwd_begin != NULL) {
2314 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2315 if (port_fwd_begin(fwd_ports_ids[i])) {
2317 "Packet forwarding is not ready\n");
2323 if (with_tx_first) {
2324 port_fwd_begin = tx_only_engine.port_fwd_begin;
2325 if (port_fwd_begin != NULL) {
2326 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2327 if (port_fwd_begin(fwd_ports_ids[i])) {
2329 "Packet forwarding is not ready\n");
2339 flush_fwd_rx_queues();
2341 pkt_fwd_config_display(&cur_fwd_config);
2342 rxtx_config_display();
2345 if (with_tx_first) {
2346 while (with_tx_first--) {
2347 launch_packet_forwarding(
2348 run_one_txonly_burst_on_core);
2349 rte_eal_mp_wait_lcore();
2351 port_fwd_end = tx_only_engine.port_fwd_end;
2352 if (port_fwd_end != NULL) {
2353 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2354 (*port_fwd_end)(fwd_ports_ids[i]);
2357 launch_packet_forwarding(start_pkt_forward_on_core);
2361 stop_packet_forwarding(void)
2363 port_fwd_end_t port_fwd_end;
2369 fprintf(stderr, "Packet forwarding not started\n");
2372 printf("Telling cores to stop...");
2373 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2374 fwd_lcores[lc_id]->stopped = 1;
2375 printf("\nWaiting for lcores to finish...\n");
2376 rte_eal_mp_wait_lcore();
2377 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2378 if (port_fwd_end != NULL) {
2379 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2380 pt_id = fwd_ports_ids[i];
2381 (*port_fwd_end)(pt_id);
2385 fwd_stats_display();
2387 printf("\nDone.\n");
2392 dev_set_link_up(portid_t pid)
2394 if (rte_eth_dev_set_link_up(pid) < 0)
2395 fprintf(stderr, "\nSet link up fail.\n");
2399 dev_set_link_down(portid_t pid)
2401 if (rte_eth_dev_set_link_down(pid) < 0)
2402 fprintf(stderr, "\nSet link down fail.\n");
2406 all_ports_started(void)
2409 struct rte_port *port;
2411 RTE_ETH_FOREACH_DEV(pi) {
2413 /* Check if there is a port which is not started */
2414 if ((port->port_status != RTE_PORT_STARTED) &&
2415 (port->slave_flag == 0))
2419 /* No port is not started */
2424 port_is_stopped(portid_t port_id)
2426 struct rte_port *port = &ports[port_id];
2428 if ((port->port_status != RTE_PORT_STOPPED) &&
2429 (port->slave_flag == 0))
2435 all_ports_stopped(void)
2439 RTE_ETH_FOREACH_DEV(pi) {
2440 if (!port_is_stopped(pi))
2448 port_is_started(portid_t port_id)
2450 if (port_id_is_invalid(port_id, ENABLED_WARN))
2453 if (ports[port_id].port_status != RTE_PORT_STARTED)
2459 /* Configure the Rx and Tx hairpin queues for the selected port. */
2461 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2464 struct rte_eth_hairpin_conf hairpin_conf = {
2469 struct rte_port *port = &ports[pi];
2470 uint16_t peer_rx_port = pi;
2471 uint16_t peer_tx_port = pi;
2472 uint32_t manual = 1;
2473 uint32_t tx_exp = hairpin_mode & 0x10;
2475 if (!(hairpin_mode & 0xf)) {
2479 } else if (hairpin_mode & 0x1) {
2480 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2481 RTE_ETH_DEV_NO_OWNER);
2482 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2483 peer_tx_port = rte_eth_find_next_owned_by(0,
2484 RTE_ETH_DEV_NO_OWNER);
2485 if (p_pi != RTE_MAX_ETHPORTS) {
2486 peer_rx_port = p_pi;
2490 /* Last port will be the peer RX port of the first. */
2491 RTE_ETH_FOREACH_DEV(next_pi)
2492 peer_rx_port = next_pi;
2495 } else if (hairpin_mode & 0x2) {
2497 peer_rx_port = p_pi;
2499 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2500 RTE_ETH_DEV_NO_OWNER);
2501 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2504 peer_tx_port = peer_rx_port;
2508 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2509 hairpin_conf.peers[0].port = peer_rx_port;
2510 hairpin_conf.peers[0].queue = i + nb_rxq;
2511 hairpin_conf.manual_bind = !!manual;
2512 hairpin_conf.tx_explicit = !!tx_exp;
2513 diag = rte_eth_tx_hairpin_queue_setup
2514 (pi, qi, nb_txd, &hairpin_conf);
2519 /* Fail to setup rx queue, return */
2520 if (rte_atomic16_cmpset(&(port->port_status),
2522 RTE_PORT_STOPPED) == 0)
2524 "Port %d can not be set back to stopped\n", pi);
2525 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2527 /* try to reconfigure queues next time */
2528 port->need_reconfig_queues = 1;
2531 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2532 hairpin_conf.peers[0].port = peer_tx_port;
2533 hairpin_conf.peers[0].queue = i + nb_txq;
2534 hairpin_conf.manual_bind = !!manual;
2535 hairpin_conf.tx_explicit = !!tx_exp;
2536 diag = rte_eth_rx_hairpin_queue_setup
2537 (pi, qi, nb_rxd, &hairpin_conf);
2542 /* Fail to setup rx queue, return */
2543 if (rte_atomic16_cmpset(&(port->port_status),
2545 RTE_PORT_STOPPED) == 0)
2547 "Port %d can not be set back to stopped\n", pi);
2548 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2550 /* try to reconfigure queues next time */
2551 port->need_reconfig_queues = 1;
2557 /* Configure the Rx with optional split. */
2559 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2560 uint16_t nb_rx_desc, unsigned int socket_id,
2561 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2563 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2564 unsigned int i, mp_n;
2567 if (rx_pkt_nb_segs <= 1 ||
2568 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2569 rx_conf->rx_seg = NULL;
2570 rx_conf->rx_nseg = 0;
2571 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2572 nb_rx_desc, socket_id,
2576 for (i = 0; i < rx_pkt_nb_segs; i++) {
2577 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2578 struct rte_mempool *mpx;
2580 * Use last valid pool for the segments with number
2581 * exceeding the pool index.
2583 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2584 mpx = mbuf_pool_find(socket_id, mp_n);
2585 /* Handle zero as mbuf data buffer size. */
2586 rx_seg->length = rx_pkt_seg_lengths[i] ?
2587 rx_pkt_seg_lengths[i] :
2588 mbuf_data_size[mp_n];
2589 rx_seg->offset = i < rx_pkt_nb_offs ?
2590 rx_pkt_seg_offsets[i] : 0;
2591 rx_seg->mp = mpx ? mpx : mp;
2593 rx_conf->rx_nseg = rx_pkt_nb_segs;
2594 rx_conf->rx_seg = rx_useg;
2595 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2596 socket_id, rx_conf, NULL);
2597 rx_conf->rx_seg = NULL;
2598 rx_conf->rx_nseg = 0;
2603 alloc_xstats_display_info(portid_t pi)
2605 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2606 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2607 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2609 if (xstats_display_num == 0)
2612 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2613 if (*ids_supp == NULL)
2616 *prev_values = calloc(xstats_display_num,
2617 sizeof(**prev_values));
2618 if (*prev_values == NULL)
2619 goto fail_prev_values;
2621 *curr_values = calloc(xstats_display_num,
2622 sizeof(**curr_values));
2623 if (*curr_values == NULL)
2624 goto fail_curr_values;
2626 ports[pi].xstats_info.allocated = true;
2639 free_xstats_display_info(portid_t pi)
2641 if (!ports[pi].xstats_info.allocated)
2643 free(ports[pi].xstats_info.ids_supp);
2644 free(ports[pi].xstats_info.prev_values);
2645 free(ports[pi].xstats_info.curr_values);
2646 ports[pi].xstats_info.allocated = false;
2649 /** Fill helper structures for specified port to show extended statistics. */
2651 fill_xstats_display_info_for_port(portid_t pi)
2653 unsigned int stat, stat_supp;
2654 const char *xstat_name;
2655 struct rte_port *port;
2659 if (xstats_display_num == 0)
2662 if (pi == (portid_t)RTE_PORT_ALL) {
2663 fill_xstats_display_info();
2668 if (port->port_status != RTE_PORT_STARTED)
2671 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2672 rte_exit(EXIT_FAILURE,
2673 "Failed to allocate xstats display memory\n");
2675 ids_supp = port->xstats_info.ids_supp;
2676 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2677 xstat_name = xstats_display[stat].name;
2678 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2679 ids_supp + stat_supp);
2681 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2682 xstat_name, pi, stat);
2688 port->xstats_info.ids_supp_sz = stat_supp;
2691 /** Fill helper structures for all ports to show extended statistics. */
2693 fill_xstats_display_info(void)
2697 if (xstats_display_num == 0)
2700 RTE_ETH_FOREACH_DEV(pi)
2701 fill_xstats_display_info_for_port(pi);
2705 start_port(portid_t pid)
2707 int diag, need_check_link_status = -1;
2709 portid_t p_pi = RTE_MAX_ETHPORTS;
2710 portid_t pl[RTE_MAX_ETHPORTS];
2711 portid_t peer_pl[RTE_MAX_ETHPORTS];
2712 uint16_t cnt_pi = 0;
2713 uint16_t cfg_pi = 0;
2716 struct rte_port *port;
2717 struct rte_eth_hairpin_cap cap;
2719 if (port_id_is_invalid(pid, ENABLED_WARN))
2722 RTE_ETH_FOREACH_DEV(pi) {
2723 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2726 need_check_link_status = 0;
2728 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2729 RTE_PORT_HANDLING) == 0) {
2730 fprintf(stderr, "Port %d is now not stopped\n", pi);
2734 if (port->need_reconfig > 0) {
2735 struct rte_eth_conf dev_conf;
2738 port->need_reconfig = 0;
2740 if (flow_isolate_all) {
2741 int ret = port_flow_isolate(pi, 1);
2744 "Failed to apply isolated mode on port %d\n",
2749 configure_rxtx_dump_callbacks(0);
2750 printf("Configuring Port %d (socket %u)\n", pi,
2752 if (nb_hairpinq > 0 &&
2753 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2755 "Port %d doesn't support hairpin queues\n",
2760 /* configure port */
2761 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2762 nb_txq + nb_hairpinq,
2765 if (rte_atomic16_cmpset(&(port->port_status),
2766 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2768 "Port %d can not be set back to stopped\n",
2770 fprintf(stderr, "Fail to configure port %d\n",
2772 /* try to reconfigure port next time */
2773 port->need_reconfig = 1;
2776 /* get device configuration*/
2778 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2780 "port %d can not get device configuration\n",
2784 /* Apply Rx offloads configuration */
2785 if (dev_conf.rxmode.offloads !=
2786 port->dev_conf.rxmode.offloads) {
2787 port->dev_conf.rxmode.offloads |=
2788 dev_conf.rxmode.offloads;
2790 k < port->dev_info.max_rx_queues;
2792 port->rx_conf[k].offloads |=
2793 dev_conf.rxmode.offloads;
2795 /* Apply Tx offloads configuration */
2796 if (dev_conf.txmode.offloads !=
2797 port->dev_conf.txmode.offloads) {
2798 port->dev_conf.txmode.offloads |=
2799 dev_conf.txmode.offloads;
2801 k < port->dev_info.max_tx_queues;
2803 port->tx_conf[k].offloads |=
2804 dev_conf.txmode.offloads;
2807 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2808 port->need_reconfig_queues = 0;
2809 /* setup tx queues */
2810 for (qi = 0; qi < nb_txq; qi++) {
2811 if ((numa_support) &&
2812 (txring_numa[pi] != NUMA_NO_CONFIG))
2813 diag = rte_eth_tx_queue_setup(pi, qi,
2814 port->nb_tx_desc[qi],
2816 &(port->tx_conf[qi]));
2818 diag = rte_eth_tx_queue_setup(pi, qi,
2819 port->nb_tx_desc[qi],
2821 &(port->tx_conf[qi]));
2826 /* Fail to setup tx queue, return */
2827 if (rte_atomic16_cmpset(&(port->port_status),
2829 RTE_PORT_STOPPED) == 0)
2831 "Port %d can not be set back to stopped\n",
2834 "Fail to configure port %d tx queues\n",
2836 /* try to reconfigure queues next time */
2837 port->need_reconfig_queues = 1;
2840 for (qi = 0; qi < nb_rxq; qi++) {
2841 /* setup rx queues */
2842 if ((numa_support) &&
2843 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2844 struct rte_mempool * mp =
2846 (rxring_numa[pi], 0);
2849 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2854 diag = rx_queue_setup(pi, qi,
2855 port->nb_rx_desc[qi],
2857 &(port->rx_conf[qi]),
2860 struct rte_mempool *mp =
2862 (port->socket_id, 0);
2865 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2869 diag = rx_queue_setup(pi, qi,
2870 port->nb_rx_desc[qi],
2872 &(port->rx_conf[qi]),
2878 /* Fail to setup rx queue, return */
2879 if (rte_atomic16_cmpset(&(port->port_status),
2881 RTE_PORT_STOPPED) == 0)
2883 "Port %d can not be set back to stopped\n",
2886 "Fail to configure port %d rx queues\n",
2888 /* try to reconfigure queues next time */
2889 port->need_reconfig_queues = 1;
2892 /* setup hairpin queues */
2893 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2896 configure_rxtx_dump_callbacks(verbose_level);
2898 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2902 "Port %d: Failed to disable Ptype parsing\n",
2910 diag = eth_dev_start_mp(pi);
2912 fprintf(stderr, "Fail to start port %d: %s\n",
2913 pi, rte_strerror(-diag));
2915 /* Fail to setup rx queue, return */
2916 if (rte_atomic16_cmpset(&(port->port_status),
2917 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2919 "Port %d can not be set back to stopped\n",
2924 if (rte_atomic16_cmpset(&(port->port_status),
2925 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2926 fprintf(stderr, "Port %d can not be set into started\n",
2929 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2930 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2931 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2933 /* at least one port started, need checking link status */
2934 need_check_link_status = 1;
2939 if (need_check_link_status == 1 && !no_link_check)
2940 check_all_ports_link_status(RTE_PORT_ALL);
2941 else if (need_check_link_status == 0)
2942 fprintf(stderr, "Please stop the ports first\n");
2944 if (hairpin_mode & 0xf) {
2948 /* bind all started hairpin ports */
2949 for (i = 0; i < cfg_pi; i++) {
2951 /* bind current Tx to all peer Rx */
2952 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2953 RTE_MAX_ETHPORTS, 1);
2956 for (j = 0; j < peer_pi; j++) {
2957 if (!port_is_started(peer_pl[j]))
2959 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2962 "Error during binding hairpin Tx port %u to %u: %s\n",
2964 rte_strerror(-diag));
2968 /* bind all peer Tx to current Rx */
2969 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2970 RTE_MAX_ETHPORTS, 0);
2973 for (j = 0; j < peer_pi; j++) {
2974 if (!port_is_started(peer_pl[j]))
2976 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2979 "Error during binding hairpin Tx port %u to %u: %s\n",
2981 rte_strerror(-diag));
2988 fill_xstats_display_info_for_port(pid);
2995 stop_port(portid_t pid)
2998 struct rte_port *port;
2999 int need_check_link_status = 0;
3000 portid_t peer_pl[RTE_MAX_ETHPORTS];
3003 if (port_id_is_invalid(pid, ENABLED_WARN))
3006 printf("Stopping ports...\n");
3008 RTE_ETH_FOREACH_DEV(pi) {
3009 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3012 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3014 "Please remove port %d from forwarding configuration.\n",
3019 if (port_is_bonding_slave(pi)) {
3021 "Please remove port %d from bonded device.\n",
3027 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
3028 RTE_PORT_HANDLING) == 0)
3031 if (hairpin_mode & 0xf) {
3034 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3035 /* unbind all peer Tx from current Rx */
3036 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3037 RTE_MAX_ETHPORTS, 0);
3040 for (j = 0; j < peer_pi; j++) {
3041 if (!port_is_started(peer_pl[j]))
3043 rte_eth_hairpin_unbind(peer_pl[j], pi);
3047 if (port->flow_list)
3048 port_flow_flush(pi);
3050 if (eth_dev_stop_mp(pi) != 0)
3051 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3054 if (rte_atomic16_cmpset(&(port->port_status),
3055 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
3056 fprintf(stderr, "Port %d can not be set into stopped\n",
3058 need_check_link_status = 1;
3060 if (need_check_link_status && !no_link_check)
3061 check_all_ports_link_status(RTE_PORT_ALL);
3067 remove_invalid_ports_in(portid_t *array, portid_t *total)
3070 portid_t new_total = 0;
3072 for (i = 0; i < *total; i++)
3073 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3074 array[new_total] = array[i];
3081 remove_invalid_ports(void)
3083 remove_invalid_ports_in(ports_ids, &nb_ports);
3084 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3085 nb_cfg_ports = nb_fwd_ports;
3089 close_port(portid_t pid)
3092 struct rte_port *port;
3094 if (port_id_is_invalid(pid, ENABLED_WARN))
3097 printf("Closing ports...\n");
3099 RTE_ETH_FOREACH_DEV(pi) {
3100 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3103 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3105 "Please remove port %d from forwarding configuration.\n",
3110 if (port_is_bonding_slave(pi)) {
3112 "Please remove port %d from bonded device.\n",
3118 if (rte_atomic16_cmpset(&(port->port_status),
3119 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
3120 fprintf(stderr, "Port %d is already closed\n", pi);
3124 if (is_proc_primary()) {
3125 port_flow_flush(pi);
3126 port_flex_item_flush(pi);
3127 rte_eth_dev_close(pi);
3130 free_xstats_display_info(pi);
3133 remove_invalid_ports();
3138 reset_port(portid_t pid)
3142 struct rte_port *port;
3144 if (port_id_is_invalid(pid, ENABLED_WARN))
3147 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3148 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3150 "Can not reset port(s), please stop port(s) first.\n");
3154 printf("Resetting ports...\n");
3156 RTE_ETH_FOREACH_DEV(pi) {
3157 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3160 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3162 "Please remove port %d from forwarding configuration.\n",
3167 if (port_is_bonding_slave(pi)) {
3169 "Please remove port %d from bonded device.\n",
3174 diag = rte_eth_dev_reset(pi);
3177 port->need_reconfig = 1;
3178 port->need_reconfig_queues = 1;
3180 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3189 attach_port(char *identifier)
3192 struct rte_dev_iterator iterator;
3194 printf("Attaching a new port...\n");
3196 if (identifier == NULL) {
3197 fprintf(stderr, "Invalid parameters are specified\n");
3201 if (rte_dev_probe(identifier) < 0) {
3202 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3206 /* first attach mode: event */
3207 if (setup_on_probe_event) {
3208 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3209 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3210 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3211 ports[pi].need_setup != 0)
3212 setup_attached_port(pi);
3216 /* second attach mode: iterator */
3217 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3218 /* setup ports matching the devargs used for probing */
3219 if (port_is_forwarding(pi))
3220 continue; /* port was already attached before */
3221 setup_attached_port(pi);
3226 setup_attached_port(portid_t pi)
3228 unsigned int socket_id;
3231 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3232 /* if socket_id is invalid, set to the first available socket. */
3233 if (check_socket_id(socket_id) < 0)
3234 socket_id = socket_ids[0];
3235 reconfig(pi, socket_id);
3236 ret = rte_eth_promiscuous_enable(pi);
3239 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3240 pi, rte_strerror(-ret));
3242 ports_ids[nb_ports++] = pi;
3243 fwd_ports_ids[nb_fwd_ports++] = pi;
3244 nb_cfg_ports = nb_fwd_ports;
3245 ports[pi].need_setup = 0;
3246 ports[pi].port_status = RTE_PORT_STOPPED;
3248 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3253 detach_device(struct rte_device *dev)
3258 fprintf(stderr, "Device already removed\n");
3262 printf("Removing a device...\n");
3264 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3265 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3266 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3267 fprintf(stderr, "Port %u not stopped\n",
3271 port_flow_flush(sibling);
3275 if (rte_dev_remove(dev) < 0) {
3276 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3279 remove_invalid_ports();
3281 printf("Device is detached\n");
3282 printf("Now total ports is %d\n", nb_ports);
3288 detach_port_device(portid_t port_id)
3291 struct rte_eth_dev_info dev_info;
3293 if (port_id_is_invalid(port_id, ENABLED_WARN))
3296 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3297 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3298 fprintf(stderr, "Port not stopped\n");
3301 fprintf(stderr, "Port was not closed\n");
3304 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3307 "Failed to get device info for port %d, not detaching\n",
3311 detach_device(dev_info.device);
3315 detach_devargs(char *identifier)
3317 struct rte_dev_iterator iterator;
3318 struct rte_devargs da;
3321 printf("Removing a device...\n");
3323 memset(&da, 0, sizeof(da));
3324 if (rte_devargs_parsef(&da, "%s", identifier)) {
3325 fprintf(stderr, "cannot parse identifier\n");
3329 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3330 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3331 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3332 fprintf(stderr, "Port %u not stopped\n",
3334 rte_eth_iterator_cleanup(&iterator);
3335 rte_devargs_reset(&da);
3338 port_flow_flush(port_id);
3342 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3343 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3344 da.name, da.bus->name);
3345 rte_devargs_reset(&da);
3349 remove_invalid_ports();
3351 printf("Device %s is detached\n", identifier);
3352 printf("Now total ports is %d\n", nb_ports);
3354 rte_devargs_reset(&da);
3365 stop_packet_forwarding();
3367 #ifndef RTE_EXEC_ENV_WINDOWS
3368 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3370 if (mp_alloc_type == MP_ALLOC_ANON)
3371 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3376 if (ports != NULL) {
3378 RTE_ETH_FOREACH_DEV(pt_id) {
3379 printf("\nStopping port %d...\n", pt_id);
3383 RTE_ETH_FOREACH_DEV(pt_id) {
3384 printf("\nShutting down port %d...\n", pt_id);
3391 ret = rte_dev_event_monitor_stop();
3394 "fail to stop device event monitor.");
3398 ret = rte_dev_event_callback_unregister(NULL,
3399 dev_event_callback, NULL);
3402 "fail to unregister device event callback.\n");
3406 ret = rte_dev_hotplug_handle_disable();
3409 "fail to disable hotplug handling.\n");
3413 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3415 mempool_free_mp(mempools[i]);
3417 free(xstats_display);
3419 printf("\nBye...\n");
3422 typedef void (*cmd_func_t)(void);
3423 struct pmd_test_command {
3424 const char *cmd_name;
3425 cmd_func_t cmd_func;
3428 /* Check the link status of all ports in up to 9s, and print them finally */
3430 check_all_ports_link_status(uint32_t port_mask)
3432 #define CHECK_INTERVAL 100 /* 100ms */
3433 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3435 uint8_t count, all_ports_up, print_flag = 0;
3436 struct rte_eth_link link;
3438 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3440 printf("Checking link statuses...\n");
3442 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3444 RTE_ETH_FOREACH_DEV(portid) {
3445 if ((port_mask & (1 << portid)) == 0)
3447 memset(&link, 0, sizeof(link));
3448 ret = rte_eth_link_get_nowait(portid, &link);
3451 if (print_flag == 1)
3453 "Port %u link get failed: %s\n",
3454 portid, rte_strerror(-ret));
3457 /* print link status if flag set */
3458 if (print_flag == 1) {
3459 rte_eth_link_to_str(link_status,
3460 sizeof(link_status), &link);
3461 printf("Port %d %s\n", portid, link_status);
3464 /* clear all_ports_up flag if any link down */
3465 if (link.link_status == ETH_LINK_DOWN) {
3470 /* after finally printing all link status, get out */
3471 if (print_flag == 1)
3474 if (all_ports_up == 0) {
3476 rte_delay_ms(CHECK_INTERVAL);
3479 /* set the print_flag if all ports up or timeout */
3480 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3490 rmv_port_callback(void *arg)
3492 int need_to_start = 0;
3493 int org_no_link_check = no_link_check;
3494 portid_t port_id = (intptr_t)arg;
3495 struct rte_eth_dev_info dev_info;
3498 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3500 if (!test_done && port_is_forwarding(port_id)) {
3502 stop_packet_forwarding();
3506 no_link_check = org_no_link_check;
3508 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3511 "Failed to get device info for port %d, not detaching\n",
3514 struct rte_device *device = dev_info.device;
3515 close_port(port_id);
3516 detach_device(device); /* might be already removed or have more ports */
3519 start_packet_forwarding(0);
3522 /* This function is used by the interrupt thread */
3524 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3527 RTE_SET_USED(param);
3528 RTE_SET_USED(ret_param);
3530 if (type >= RTE_ETH_EVENT_MAX) {
3532 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3533 port_id, __func__, type);
3535 } else if (event_print_mask & (UINT32_C(1) << type)) {
3536 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3537 eth_event_desc[type]);
3542 case RTE_ETH_EVENT_NEW:
3543 ports[port_id].need_setup = 1;
3544 ports[port_id].port_status = RTE_PORT_HANDLING;
3546 case RTE_ETH_EVENT_INTR_RMV:
3547 if (port_id_is_invalid(port_id, DISABLED_WARN))
3549 if (rte_eal_alarm_set(100000,
3550 rmv_port_callback, (void *)(intptr_t)port_id))
3552 "Could not set up deferred device removal\n");
3554 case RTE_ETH_EVENT_DESTROY:
3555 ports[port_id].port_status = RTE_PORT_CLOSED;
3556 printf("Port %u is closed\n", port_id);
3565 register_eth_event_callback(void)
3568 enum rte_eth_event_type event;
3570 for (event = RTE_ETH_EVENT_UNKNOWN;
3571 event < RTE_ETH_EVENT_MAX; event++) {
3572 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3577 TESTPMD_LOG(ERR, "Failed to register callback for "
3578 "%s event\n", eth_event_desc[event]);
3586 /* This function is used by the interrupt thread */
3588 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3589 __rte_unused void *arg)
3594 if (type >= RTE_DEV_EVENT_MAX) {
3595 fprintf(stderr, "%s called upon invalid event %d\n",
3601 case RTE_DEV_EVENT_REMOVE:
3602 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3604 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3606 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3611 * Because the user's callback is invoked in eal interrupt
3612 * callback, the interrupt callback need to be finished before
3613 * it can be unregistered when detaching device. So finish
3614 * callback soon and use a deferred removal to detach device
3615 * is need. It is a workaround, once the device detaching be
3616 * moved into the eal in the future, the deferred removal could
3619 if (rte_eal_alarm_set(100000,
3620 rmv_port_callback, (void *)(intptr_t)port_id))
3622 "Could not set up deferred device removal\n");
3624 case RTE_DEV_EVENT_ADD:
3625 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3627 /* TODO: After finish kernel driver binding,
3628 * begin to attach port.
3637 rxtx_port_config(portid_t pid)
3641 struct rte_port *port = &ports[pid];
3643 for (qid = 0; qid < nb_rxq; qid++) {
3644 offloads = port->rx_conf[qid].offloads;
3645 port->rx_conf[qid] = port->dev_info.default_rxconf;
3647 if (rxq_share > 0 &&
3648 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3649 /* Non-zero share group to enable RxQ share. */
3650 port->rx_conf[qid].share_group = pid / rxq_share + 1;
3651 port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
3655 port->rx_conf[qid].offloads = offloads;
3657 /* Check if any Rx parameters have been passed */
3658 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3659 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3661 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3662 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3664 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3665 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3667 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3668 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3670 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3671 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3673 port->nb_rx_desc[qid] = nb_rxd;
3676 for (qid = 0; qid < nb_txq; qid++) {
3677 offloads = port->tx_conf[qid].offloads;
3678 port->tx_conf[qid] = port->dev_info.default_txconf;
3680 port->tx_conf[qid].offloads = offloads;
3682 /* Check if any Tx parameters have been passed */
3683 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3684 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3686 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3687 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3689 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3690 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3692 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3693 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3695 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3696 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3698 port->nb_tx_desc[qid] = nb_txd;
3703 * Helper function to set MTU from frame size
3705 * port->dev_info should be set before calling this function.
3707 * return 0 on success, negative on error
3710 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3712 struct rte_port *port = &ports[portid];
3713 uint32_t eth_overhead;
3714 uint16_t mtu, new_mtu;
3716 eth_overhead = get_eth_overhead(&port->dev_info);
3718 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3719 printf("Failed to get MTU for port %u\n", portid);
3723 new_mtu = max_rx_pktlen - eth_overhead;
3728 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3730 "Failed to set MTU to %u for port %u\n",
3735 port->dev_conf.rxmode.mtu = new_mtu;
3741 init_port_config(void)
3744 struct rte_port *port;
3747 RTE_ETH_FOREACH_DEV(pid) {
3749 port->dev_conf.fdir_conf = fdir_conf;
3751 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3756 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3757 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3758 rss_hf & port->dev_info.flow_type_rss_offloads;
3760 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3761 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3764 if (port->dcb_flag == 0) {
3765 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3766 port->dev_conf.rxmode.mq_mode =
3767 (enum rte_eth_rx_mq_mode)
3768 (rx_mq_mode & ETH_MQ_RX_RSS);
3770 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3771 port->dev_conf.rxmode.offloads &=
3772 ~DEV_RX_OFFLOAD_RSS_HASH;
3775 i < port->dev_info.nb_rx_queues;
3777 port->rx_conf[i].offloads &=
3778 ~DEV_RX_OFFLOAD_RSS_HASH;
3782 rxtx_port_config(pid);
3784 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3788 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3789 rte_pmd_ixgbe_bypass_init(pid);
3792 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3793 port->dev_conf.intr_conf.lsc = 1;
3794 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3795 port->dev_conf.intr_conf.rmv = 1;
3799 void set_port_slave_flag(portid_t slave_pid)
3801 struct rte_port *port;
3803 port = &ports[slave_pid];
3804 port->slave_flag = 1;
3807 void clear_port_slave_flag(portid_t slave_pid)
3809 struct rte_port *port;
3811 port = &ports[slave_pid];
3812 port->slave_flag = 0;
3815 uint8_t port_is_bonding_slave(portid_t slave_pid)
3817 struct rte_port *port;
3818 struct rte_eth_dev_info dev_info;
3821 port = &ports[slave_pid];
3822 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3825 "Failed to get device info for port id %d,"
3826 "cannot determine if the port is a bonded slave",
3830 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3835 const uint16_t vlan_tags[] = {
3836 0, 1, 2, 3, 4, 5, 6, 7,
3837 8, 9, 10, 11, 12, 13, 14, 15,
3838 16, 17, 18, 19, 20, 21, 22, 23,
3839 24, 25, 26, 27, 28, 29, 30, 31
3843 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3844 enum dcb_mode_enable dcb_mode,
3845 enum rte_eth_nb_tcs num_tcs,
3850 struct rte_eth_rss_conf rss_conf;
3853 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3854 * given above, and the number of traffic classes available for use.
3856 if (dcb_mode == DCB_VT_ENABLED) {
3857 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3858 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3859 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3860 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3862 /* VMDQ+DCB RX and TX configurations */
3863 vmdq_rx_conf->enable_default_pool = 0;
3864 vmdq_rx_conf->default_pool = 0;
3865 vmdq_rx_conf->nb_queue_pools =
3866 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3867 vmdq_tx_conf->nb_queue_pools =
3868 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3870 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3871 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3872 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3873 vmdq_rx_conf->pool_map[i].pools =
3874 1 << (i % vmdq_rx_conf->nb_queue_pools);
3876 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3877 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3878 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3881 /* set DCB mode of RX and TX of multiple queues */
3882 eth_conf->rxmode.mq_mode =
3883 (enum rte_eth_rx_mq_mode)
3884 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3885 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3887 struct rte_eth_dcb_rx_conf *rx_conf =
3888 ð_conf->rx_adv_conf.dcb_rx_conf;
3889 struct rte_eth_dcb_tx_conf *tx_conf =
3890 ð_conf->tx_adv_conf.dcb_tx_conf;
3892 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3894 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3898 rx_conf->nb_tcs = num_tcs;
3899 tx_conf->nb_tcs = num_tcs;
3901 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3902 rx_conf->dcb_tc[i] = i % num_tcs;
3903 tx_conf->dcb_tc[i] = i % num_tcs;
3906 eth_conf->rxmode.mq_mode =
3907 (enum rte_eth_rx_mq_mode)
3908 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3909 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3910 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3914 eth_conf->dcb_capability_en =
3915 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3917 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3923 init_port_dcb_config(portid_t pid,
3924 enum dcb_mode_enable dcb_mode,
3925 enum rte_eth_nb_tcs num_tcs,
3928 struct rte_eth_conf port_conf;
3929 struct rte_port *rte_port;
3933 if (num_procs > 1) {
3934 printf("The multi-process feature doesn't support dcb.\n");
3937 rte_port = &ports[pid];
3939 /* retain the original device configuration. */
3940 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3942 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3943 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3946 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3948 /* re-configure the device . */
3949 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3953 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3957 /* If dev_info.vmdq_pool_base is greater than 0,
3958 * the queue id of vmdq pools is started after pf queues.
3960 if (dcb_mode == DCB_VT_ENABLED &&
3961 rte_port->dev_info.vmdq_pool_base > 0) {
3963 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3968 /* Assume the ports in testpmd have the same dcb capability
3969 * and has the same number of rxq and txq in dcb mode
3971 if (dcb_mode == DCB_VT_ENABLED) {
3972 if (rte_port->dev_info.max_vfs > 0) {
3973 nb_rxq = rte_port->dev_info.nb_rx_queues;
3974 nb_txq = rte_port->dev_info.nb_tx_queues;
3976 nb_rxq = rte_port->dev_info.max_rx_queues;
3977 nb_txq = rte_port->dev_info.max_tx_queues;
3980 /*if vt is disabled, use all pf queues */
3981 if (rte_port->dev_info.vmdq_pool_base == 0) {
3982 nb_rxq = rte_port->dev_info.max_rx_queues;
3983 nb_txq = rte_port->dev_info.max_tx_queues;
3985 nb_rxq = (queueid_t)num_tcs;
3986 nb_txq = (queueid_t)num_tcs;
3990 rx_free_thresh = 64;
3992 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3994 rxtx_port_config(pid);
3996 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3997 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3998 rx_vft_set(pid, vlan_tags[i], 1);
4000 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4004 rte_port->dcb_flag = 1;
4006 /* Enter DCB configuration status */
4017 /* Configuration of Ethernet ports. */
4018 ports = rte_zmalloc("testpmd: ports",
4019 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4020 RTE_CACHE_LINE_SIZE);
4021 if (ports == NULL) {
4022 rte_exit(EXIT_FAILURE,
4023 "rte_zmalloc(%d struct rte_port) failed\n",
4026 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4027 ports[i].xstats_info.allocated = false;
4028 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4029 LIST_INIT(&ports[i].flow_tunnel_list);
4030 /* Initialize ports NUMA structures */
4031 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4032 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4033 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4047 const char clr[] = { 27, '[', '2', 'J', '\0' };
4048 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4050 /* Clear screen and move to top left */
4051 printf("%s%s", clr, top_left);
4053 printf("\nPort statistics ====================================");
4054 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4055 nic_stats_display(fwd_ports_ids[i]);
4061 signal_handler(int signum)
4063 if (signum == SIGINT || signum == SIGTERM) {
4064 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4066 #ifdef RTE_LIB_PDUMP
4067 /* uninitialize packet capture framework */
4070 #ifdef RTE_LIB_LATENCYSTATS
4071 if (latencystats_enabled != 0)
4072 rte_latencystats_uninit();
4075 /* Set flag to indicate the force termination. */
4077 /* exit with the expected status */
4078 #ifndef RTE_EXEC_ENV_WINDOWS
4079 signal(signum, SIG_DFL);
4080 kill(getpid(), signum);
4086 main(int argc, char** argv)
4093 signal(SIGINT, signal_handler);
4094 signal(SIGTERM, signal_handler);
4096 testpmd_logtype = rte_log_register("testpmd");
4097 if (testpmd_logtype < 0)
4098 rte_exit(EXIT_FAILURE, "Cannot register log type");
4099 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4101 diag = rte_eal_init(argc, argv);
4103 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4104 rte_strerror(rte_errno));
4106 ret = register_eth_event_callback();
4108 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4110 #ifdef RTE_LIB_PDUMP
4111 /* initialize packet capture framework */
4116 RTE_ETH_FOREACH_DEV(port_id) {
4117 ports_ids[count] = port_id;
4120 nb_ports = (portid_t) count;
4122 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4124 /* allocate port structures, and init them */
4127 set_def_fwd_config();
4129 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4130 "Check the core mask argument\n");
4132 /* Bitrate/latency stats disabled by default */
4133 #ifdef RTE_LIB_BITRATESTATS
4134 bitrate_enabled = 0;
4136 #ifdef RTE_LIB_LATENCYSTATS
4137 latencystats_enabled = 0;
4140 /* on FreeBSD, mlockall() is disabled by default */
4141 #ifdef RTE_EXEC_ENV_FREEBSD
4150 launch_args_parse(argc, argv);
4152 #ifndef RTE_EXEC_ENV_WINDOWS
4153 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4154 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4159 if (tx_first && interactive)
4160 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4161 "interactive mode.\n");
4163 if (tx_first && lsc_interrupt) {
4165 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4169 if (!nb_rxq && !nb_txq)
4171 "Warning: Either rx or tx queues should be non-zero\n");
4173 if (nb_rxq > 1 && nb_rxq > nb_txq)
4175 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4181 ret = rte_dev_hotplug_handle_enable();
4184 "fail to enable hotplug handling.");
4188 ret = rte_dev_event_monitor_start();
4191 "fail to start device event monitoring.");
4195 ret = rte_dev_event_callback_register(NULL,
4196 dev_event_callback, NULL);
4199 "fail to register device event callback\n");
4204 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4205 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4207 /* set all ports to promiscuous mode by default */
4208 RTE_ETH_FOREACH_DEV(port_id) {
4209 ret = rte_eth_promiscuous_enable(port_id);
4212 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4213 port_id, rte_strerror(-ret));
4216 /* Init metrics library */
4217 rte_metrics_init(rte_socket_id());
4219 #ifdef RTE_LIB_LATENCYSTATS
4220 if (latencystats_enabled != 0) {
4221 int ret = rte_latencystats_init(1, NULL);
4224 "Warning: latencystats init() returned error %d\n",
4226 fprintf(stderr, "Latencystats running on lcore %d\n",
4227 latencystats_lcore_id);
4231 /* Setup bitrate stats */
4232 #ifdef RTE_LIB_BITRATESTATS
4233 if (bitrate_enabled != 0) {
4234 bitrate_data = rte_stats_bitrate_create();
4235 if (bitrate_data == NULL)
4236 rte_exit(EXIT_FAILURE,
4237 "Could not allocate bitrate data.\n");
4238 rte_stats_bitrate_reg(bitrate_data);
4241 #ifdef RTE_LIB_CMDLINE
4242 if (strlen(cmdline_filename) != 0)
4243 cmdline_read_from_file(cmdline_filename);
4245 if (interactive == 1) {
4247 printf("Start automatic packet forwarding\n");
4248 start_packet_forwarding(0);
4260 printf("No commandline core given, start packet forwarding\n");
4261 start_packet_forwarding(tx_first);
4262 if (stats_period != 0) {
4263 uint64_t prev_time = 0, cur_time, diff_time = 0;
4264 uint64_t timer_period;
4266 /* Convert to number of cycles */
4267 timer_period = stats_period * rte_get_timer_hz();
4269 while (f_quit == 0) {
4270 cur_time = rte_get_timer_cycles();
4271 diff_time += cur_time - prev_time;
4273 if (diff_time >= timer_period) {
4275 /* Reset the timer */
4278 /* Sleep to avoid unnecessary checks */
4279 prev_time = cur_time;
4280 rte_delay_us_sleep(US_PER_S);
4284 printf("Press enter to exit\n");
4285 rc = read(0, &c, 1);
4291 ret = rte_eal_cleanup();
4293 rte_exit(EXIT_FAILURE,
4294 "EAL cleanup failed: %s\n", strerror(-ret));
4296 return EXIT_SUCCESS;