1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
52 #include <rte_pmd_ixgbe.h>
55 #include <rte_pdump.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
65 #ifdef RTE_EXEC_ENV_WINDOWS
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
75 #define HUGE_FLAG MAP_HUGETLB
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
95 char cmdline_filename[PATH_MAX] = {0};
98 * NUMA support configuration.
99 * When set, the NUMA support attempts to dispatch the allocation of the
100 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101 * probed ports among the CPU sockets 0 and 1.
102 * Otherwise, all memory is allocated from CPU socket 0.
104 uint8_t numa_support = 1; /**< numa enabled by default */
107 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110 uint8_t socket_num = UMA_NO_CONFIG;
113 * Select mempool allocation type:
114 * - native: use regular DPDK memory
115 * - anon: use regular DPDK memory to create mempool, but populate using
116 * anonymous memory (may not be IOVA-contiguous)
117 * - xmem: use externally allocated hugepage memory
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
122 * Store specified sockets on which memory pool to be used by ports
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which RX ring to be used by ports
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
134 * Store specified sockets on which TX ring to be used by ports
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
140 * Record the Ethernet address of peer target ports to which packets are
142 * Must be instantiated with the ethernet addresses of peer traffic generator
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
149 * Probed Target Environment.
151 struct rte_port *ports; /**< For all probed ethernet ports. */
152 portid_t nb_ports; /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
159 * Test Forwarding Configuration.
160 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t nb_cfg_ports; /**< Number of configured ports. */
166 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
175 * Forwarding engines.
177 struct fwd_engine * fwd_engines[] = {
187 &five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 &ieee1588_fwd_engine,
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
208 * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
211 /** Extended statistics to show. */
212 struct rte_eth_xstat_name *xstats_display;
214 unsigned int xstats_display_num; /**< Size of extended statistics to show */
217 * In container, it cannot terminate the process which running with 'stats-period'
218 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
223 * Configuration of packet segments used to scatter received packets
224 * if some of split features is configured.
226 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
227 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
228 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
229 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
232 * Configuration of packet segments used by the "txonly" processing engine.
234 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
235 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
236 TXONLY_DEF_PACKET_LEN,
238 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
240 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
241 /**< Split policy for packets to TX. */
243 uint8_t txonly_multi_flow;
244 /**< Whether multiple flows are generated in TXONLY mode. */
246 uint32_t tx_pkt_times_inter;
247 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
249 uint32_t tx_pkt_times_intra;
250 /**< Timings for send scheduling in TXONLY mode, time between packets. */
252 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
253 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
254 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
255 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
257 /* current configuration is in DCB or not,0 means it is not in DCB mode */
258 uint8_t dcb_config = 0;
261 * Configurable number of RX/TX queues.
263 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
264 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
265 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
268 * Configurable number of RX/TX ring descriptors.
269 * Defaults are supplied by drivers via ethdev.
271 #define RTE_TEST_RX_DESC_DEFAULT 0
272 #define RTE_TEST_TX_DESC_DEFAULT 0
273 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
274 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
276 #define RTE_PMD_PARAM_UNSET -1
278 * Configurable values of RX and TX ring threshold registers.
281 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
282 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
283 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
285 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
286 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
287 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
290 * Configurable value of RX free threshold.
292 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
295 * Configurable value of RX drop enable.
297 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
300 * Configurable value of TX free threshold.
302 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
305 * Configurable value of TX RS bit threshold.
307 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
310 * Configurable value of buffered packets before sending.
312 uint16_t noisy_tx_sw_bufsz;
315 * Configurable value of packet buffer timeout.
317 uint16_t noisy_tx_sw_buf_flush_time;
320 * Configurable value for size of VNF internal memory area
321 * used for simulating noisy neighbour behaviour
323 uint64_t noisy_lkup_mem_sz;
326 * Configurable value of number of random writes done in
327 * VNF simulation memory area.
329 uint64_t noisy_lkup_num_writes;
332 * Configurable value of number of random reads done in
333 * VNF simulation memory area.
335 uint64_t noisy_lkup_num_reads;
338 * Configurable value of number of random reads/writes done in
339 * VNF simulation memory area.
341 uint64_t noisy_lkup_num_reads_writes;
344 * Receive Side Scaling (RSS) configuration.
346 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
349 * Port topology configuration
351 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
354 * Avoids to flush all the RX streams before starts forwarding.
356 uint8_t no_flush_rx = 0; /* flush by default */
359 * Flow API isolated mode.
361 uint8_t flow_isolate_all;
364 * Avoids to check link status when starting/stopping a port.
366 uint8_t no_link_check = 0; /* check by default */
369 * Don't automatically start all ports in interactive mode.
371 uint8_t no_device_start = 0;
374 * Enable link status change notification
376 uint8_t lsc_interrupt = 1; /* enabled by default */
379 * Enable device removal notification.
381 uint8_t rmv_interrupt = 1; /* enabled by default */
383 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
385 /* After attach, port setup is called on event or by iterator */
386 bool setup_on_probe_event = true;
388 /* Clear ptypes on port initialization. */
389 uint8_t clear_ptypes = true;
391 /* Hairpin ports configuration mode. */
392 uint16_t hairpin_mode;
394 /* Pretty printing of ethdev events */
395 static const char * const eth_event_desc[] = {
396 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
397 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
398 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
399 [RTE_ETH_EVENT_INTR_RESET] = "reset",
400 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
401 [RTE_ETH_EVENT_IPSEC] = "IPsec",
402 [RTE_ETH_EVENT_MACSEC] = "MACsec",
403 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
404 [RTE_ETH_EVENT_NEW] = "device probed",
405 [RTE_ETH_EVENT_DESTROY] = "device released",
406 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
407 [RTE_ETH_EVENT_MAX] = NULL,
411 * Display or mask ether events
412 * Default to all events except VF_MBOX
414 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
415 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
416 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
417 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
418 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
419 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
420 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
421 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
423 * Decide if all memory are locked for performance.
428 * NIC bypass mode configuration options.
431 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
432 /* The NIC bypass watchdog timeout. */
433 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
437 #ifdef RTE_LIB_LATENCYSTATS
440 * Set when latency stats is enabled in the commandline
442 uint8_t latencystats_enabled;
445 * Lcore ID to serive latency statistics.
447 lcoreid_t latencystats_lcore_id = -1;
452 * Ethernet device configuration.
454 struct rte_eth_rxmode rx_mode = {
455 /* Default maximum frame length.
456 * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
462 struct rte_eth_txmode tx_mode = {
463 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
466 struct rte_fdir_conf fdir_conf = {
467 .mode = RTE_FDIR_MODE_NONE,
468 .pballoc = RTE_FDIR_PBALLOC_64K,
469 .status = RTE_FDIR_REPORT_STATUS,
471 .vlan_tci_mask = 0xFFEF,
473 .src_ip = 0xFFFFFFFF,
474 .dst_ip = 0xFFFFFFFF,
477 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
478 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
480 .src_port_mask = 0xFFFF,
481 .dst_port_mask = 0xFFFF,
482 .mac_addr_byte_mask = 0xFF,
483 .tunnel_type_mask = 1,
484 .tunnel_id_mask = 0xFFFFFFFF,
489 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
492 * Display zero values by default for xstats
494 uint8_t xstats_hide_zero;
497 * Measure of CPU cycles disabled by default
499 uint8_t record_core_cycles;
502 * Display of RX and TX bursts disabled by default
504 uint8_t record_burst_stats;
506 unsigned int num_sockets = 0;
507 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
509 #ifdef RTE_LIB_BITRATESTATS
510 /* Bitrate statistics */
511 struct rte_stats_bitrates *bitrate_data;
512 lcoreid_t bitrate_lcore_id;
513 uint8_t bitrate_enabled;
516 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
517 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
520 * hexadecimal bitmask of RX mq mode can be enabled.
522 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
525 * Used to set forced link speed
527 uint32_t eth_link_speed;
530 * ID of the current process in multi-process, used to
531 * configure the queues to be polled.
536 * Number of processes in multi-process, used to
537 * configure the queues to be polled.
539 unsigned int num_procs = 1;
542 eth_rx_metadata_negotiate_mp(uint16_t port_id)
544 uint64_t rx_meta_features = 0;
547 if (!is_proc_primary())
550 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
551 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
552 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
554 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
556 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
557 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
561 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
562 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
566 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
567 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
570 } else if (ret != -ENOTSUP) {
571 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
572 port_id, rte_strerror(-ret));
577 flow_pick_transfer_proxy_mp(uint16_t port_id)
579 struct rte_port *port = &ports[port_id];
582 port->flow_transfer_proxy = port_id;
584 if (!is_proc_primary())
587 ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
590 fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
591 port_id, rte_strerror(-ret));
596 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
597 const struct rte_eth_conf *dev_conf)
599 if (is_proc_primary())
600 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
606 eth_dev_start_mp(uint16_t port_id)
608 if (is_proc_primary())
609 return rte_eth_dev_start(port_id);
615 eth_dev_stop_mp(uint16_t port_id)
617 if (is_proc_primary())
618 return rte_eth_dev_stop(port_id);
624 mempool_free_mp(struct rte_mempool *mp)
626 if (is_proc_primary())
627 rte_mempool_free(mp);
631 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
633 if (is_proc_primary())
634 return rte_eth_dev_set_mtu(port_id, mtu);
639 /* Forward function declarations */
640 static void setup_attached_port(portid_t pi);
641 static void check_all_ports_link_status(uint32_t port_mask);
642 static int eth_event_callback(portid_t port_id,
643 enum rte_eth_event_type type,
644 void *param, void *ret_param);
645 static void dev_event_callback(const char *device_name,
646 enum rte_dev_event_type type,
648 static void fill_xstats_display_info(void);
651 * Check if all the ports are started.
652 * If yes, return positive value. If not, return zero.
654 static int all_ports_started(void);
656 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
657 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
659 /* Holds the registered mbuf dynamic flags names. */
660 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
664 * Helper function to check if socket is already discovered.
665 * If yes, return positive value. If not, return zero.
668 new_socket_id(unsigned int socket_id)
672 for (i = 0; i < num_sockets; i++) {
673 if (socket_ids[i] == socket_id)
680 * Setup default configuration.
683 set_default_fwd_lcores_config(void)
687 unsigned int sock_num;
690 for (i = 0; i < RTE_MAX_LCORE; i++) {
691 if (!rte_lcore_is_enabled(i))
693 sock_num = rte_lcore_to_socket_id(i);
694 if (new_socket_id(sock_num)) {
695 if (num_sockets >= RTE_MAX_NUMA_NODES) {
696 rte_exit(EXIT_FAILURE,
697 "Total sockets greater than %u\n",
700 socket_ids[num_sockets++] = sock_num;
702 if (i == rte_get_main_lcore())
704 fwd_lcores_cpuids[nb_lc++] = i;
706 nb_lcores = (lcoreid_t) nb_lc;
707 nb_cfg_lcores = nb_lcores;
712 set_def_peer_eth_addrs(void)
716 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
717 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
718 peer_eth_addrs[i].addr_bytes[5] = i;
723 set_default_fwd_ports_config(void)
728 RTE_ETH_FOREACH_DEV(pt_id) {
729 fwd_ports_ids[i++] = pt_id;
731 /* Update sockets info according to the attached device */
732 int socket_id = rte_eth_dev_socket_id(pt_id);
733 if (socket_id >= 0 && new_socket_id(socket_id)) {
734 if (num_sockets >= RTE_MAX_NUMA_NODES) {
735 rte_exit(EXIT_FAILURE,
736 "Total sockets greater than %u\n",
739 socket_ids[num_sockets++] = socket_id;
743 nb_cfg_ports = nb_ports;
744 nb_fwd_ports = nb_ports;
748 set_def_fwd_config(void)
750 set_default_fwd_lcores_config();
751 set_def_peer_eth_addrs();
752 set_default_fwd_ports_config();
755 #ifndef RTE_EXEC_ENV_WINDOWS
756 /* extremely pessimistic estimation of memory required to create a mempool */
758 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
760 unsigned int n_pages, mbuf_per_pg, leftover;
761 uint64_t total_mem, mbuf_mem, obj_sz;
763 /* there is no good way to predict how much space the mempool will
764 * occupy because it will allocate chunks on the fly, and some of those
765 * will come from default DPDK memory while some will come from our
766 * external memory, so just assume 128MB will be enough for everyone.
768 uint64_t hdr_mem = 128 << 20;
770 /* account for possible non-contiguousness */
771 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
773 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
777 mbuf_per_pg = pgsz / obj_sz;
778 leftover = (nb_mbufs % mbuf_per_pg) > 0;
779 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
781 mbuf_mem = n_pages * pgsz;
783 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
785 if (total_mem > SIZE_MAX) {
786 TESTPMD_LOG(ERR, "Memory size too big\n");
789 *out = (size_t)total_mem;
795 pagesz_flags(uint64_t page_sz)
797 /* as per mmap() manpage, all page sizes are log2 of page size
798 * shifted by MAP_HUGE_SHIFT
800 int log2 = rte_log2_u64(page_sz);
802 return (log2 << HUGE_SHIFT);
806 alloc_mem(size_t memsz, size_t pgsz, bool huge)
811 /* allocate anonymous hugepages */
812 flags = MAP_ANONYMOUS | MAP_PRIVATE;
814 flags |= HUGE_FLAG | pagesz_flags(pgsz);
816 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
817 if (addr == MAP_FAILED)
823 struct extmem_param {
827 rte_iova_t *iova_table;
828 unsigned int iova_table_len;
832 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
835 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
836 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
837 unsigned int cur_page, n_pages, pgsz_idx;
838 size_t mem_sz, cur_pgsz;
839 rte_iova_t *iovas = NULL;
843 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
844 /* skip anything that is too big */
845 if (pgsizes[pgsz_idx] > SIZE_MAX)
848 cur_pgsz = pgsizes[pgsz_idx];
850 /* if we were told not to allocate hugepages, override */
852 cur_pgsz = sysconf(_SC_PAGESIZE);
854 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
856 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
860 /* allocate our memory */
861 addr = alloc_mem(mem_sz, cur_pgsz, huge);
863 /* if we couldn't allocate memory with a specified page size,
864 * that doesn't mean we can't do it with other page sizes, so
870 /* store IOVA addresses for every page in this memory area */
871 n_pages = mem_sz / cur_pgsz;
873 iovas = malloc(sizeof(*iovas) * n_pages);
876 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
879 /* lock memory if it's not huge pages */
883 /* populate IOVA addresses */
884 for (cur_page = 0; cur_page < n_pages; cur_page++) {
889 offset = cur_pgsz * cur_page;
890 cur = RTE_PTR_ADD(addr, offset);
892 /* touch the page before getting its IOVA */
893 *(volatile char *)cur = 0;
895 iova = rte_mem_virt2iova(cur);
897 iovas[cur_page] = iova;
902 /* if we couldn't allocate anything */
908 param->pgsz = cur_pgsz;
909 param->iova_table = iovas;
910 param->iova_table_len = n_pages;
917 munmap(addr, mem_sz);
923 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
925 struct extmem_param param;
928 memset(¶m, 0, sizeof(param));
930 /* check if our heap exists */
931 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
933 /* create our heap */
934 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
936 TESTPMD_LOG(ERR, "Cannot create heap\n");
941 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
943 TESTPMD_LOG(ERR, "Cannot create memory area\n");
947 /* we now have a valid memory area, so add it to heap */
948 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
949 param.addr, param.len, param.iova_table,
950 param.iova_table_len, param.pgsz);
952 /* when using VFIO, memory is automatically mapped for DMA by EAL */
954 /* not needed any more */
955 free(param.iova_table);
958 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
959 munmap(param.addr, param.len);
965 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
971 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
972 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
977 RTE_ETH_FOREACH_DEV(pid) {
978 struct rte_eth_dev_info dev_info;
980 ret = eth_dev_info_get_print_err(pid, &dev_info);
983 "unable to get device info for port %d on addr 0x%p,"
984 "mempool unmapping will not be performed\n",
989 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
992 "unable to DMA unmap addr 0x%p "
994 memhdr->addr, dev_info.device->name);
997 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1000 "unable to un-register addr 0x%p\n", memhdr->addr);
1005 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1006 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1009 size_t page_size = sysconf(_SC_PAGESIZE);
1012 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1016 "unable to register addr 0x%p\n", memhdr->addr);
1019 RTE_ETH_FOREACH_DEV(pid) {
1020 struct rte_eth_dev_info dev_info;
1022 ret = eth_dev_info_get_print_err(pid, &dev_info);
1025 "unable to get device info for port %d on addr 0x%p,"
1026 "mempool mapping will not be performed\n",
1030 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1033 "unable to DMA map addr 0x%p "
1035 memhdr->addr, dev_info.device->name);
1042 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1043 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1045 struct rte_pktmbuf_extmem *xmem;
1046 unsigned int ext_num, zone_num, elt_num;
1049 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1050 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1051 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1053 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1055 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1056 "external buffer descriptors\n");
1060 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1061 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1062 const struct rte_memzone *mz;
1063 char mz_name[RTE_MEMZONE_NAMESIZE];
1066 ret = snprintf(mz_name, sizeof(mz_name),
1067 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1068 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1069 errno = ENAMETOOLONG;
1073 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1075 RTE_MEMZONE_IOVA_CONTIG |
1077 RTE_MEMZONE_SIZE_HINT_ONLY,
1081 * The caller exits on external buffer creation
1082 * error, so there is no need to free memzones.
1088 xseg->buf_ptr = mz->addr;
1089 xseg->buf_iova = mz->iova;
1090 xseg->buf_len = EXTBUF_ZONE_SIZE;
1091 xseg->elt_size = elt_size;
1093 if (ext_num == 0 && xmem != NULL) {
1102 * Configuration initialisation done once at init time.
1104 static struct rte_mempool *
1105 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1106 unsigned int socket_id, uint16_t size_idx)
1108 char pool_name[RTE_MEMPOOL_NAMESIZE];
1109 struct rte_mempool *rte_mp = NULL;
1110 #ifndef RTE_EXEC_ENV_WINDOWS
1113 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1115 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1116 if (!is_proc_primary()) {
1117 rte_mp = rte_mempool_lookup(pool_name);
1119 rte_exit(EXIT_FAILURE,
1120 "Get mbuf pool for socket %u failed: %s\n",
1121 socket_id, rte_strerror(rte_errno));
1126 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1127 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1129 switch (mp_alloc_type) {
1130 case MP_ALLOC_NATIVE:
1132 /* wrapper to rte_mempool_create() */
1133 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1134 rte_mbuf_best_mempool_ops());
1135 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1136 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1139 #ifndef RTE_EXEC_ENV_WINDOWS
1142 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1143 mb_size, (unsigned int) mb_mempool_cache,
1144 sizeof(struct rte_pktmbuf_pool_private),
1145 socket_id, mempool_flags);
1149 if (rte_mempool_populate_anon(rte_mp) == 0) {
1150 rte_mempool_free(rte_mp);
1154 rte_pktmbuf_pool_init(rte_mp, NULL);
1155 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1156 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1160 case MP_ALLOC_XMEM_HUGE:
1163 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1165 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1166 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1169 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1170 if (heap_socket < 0)
1171 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1173 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1174 rte_mbuf_best_mempool_ops());
1175 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1176 mb_mempool_cache, 0, mbuf_seg_size,
1183 struct rte_pktmbuf_extmem *ext_mem;
1184 unsigned int ext_num;
1186 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1187 socket_id, pool_name, &ext_mem);
1189 rte_exit(EXIT_FAILURE,
1190 "Can't create pinned data buffers\n");
1192 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1193 rte_mbuf_best_mempool_ops());
1194 rte_mp = rte_pktmbuf_pool_create_extbuf
1195 (pool_name, nb_mbuf, mb_mempool_cache,
1196 0, mbuf_seg_size, socket_id,
1203 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1207 #ifndef RTE_EXEC_ENV_WINDOWS
1210 if (rte_mp == NULL) {
1211 rte_exit(EXIT_FAILURE,
1212 "Creation of mbuf pool for socket %u failed: %s\n",
1213 socket_id, rte_strerror(rte_errno));
1214 } else if (verbose_level > 0) {
1215 rte_mempool_dump(stdout, rte_mp);
1221 * Check given socket id is valid or not with NUMA mode,
1222 * if valid, return 0, else return -1
1225 check_socket_id(const unsigned int socket_id)
1227 static int warning_once = 0;
1229 if (new_socket_id(socket_id)) {
1230 if (!warning_once && numa_support)
1232 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1240 * Get the allowed maximum number of RX queues.
1241 * *pid return the port id which has minimal value of
1242 * max_rx_queues in all ports.
1245 get_allowed_max_nb_rxq(portid_t *pid)
1247 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1248 bool max_rxq_valid = false;
1250 struct rte_eth_dev_info dev_info;
1252 RTE_ETH_FOREACH_DEV(pi) {
1253 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1256 max_rxq_valid = true;
1257 if (dev_info.max_rx_queues < allowed_max_rxq) {
1258 allowed_max_rxq = dev_info.max_rx_queues;
1262 return max_rxq_valid ? allowed_max_rxq : 0;
1266 * Check input rxq is valid or not.
1267 * If input rxq is not greater than any of maximum number
1268 * of RX queues of all ports, it is valid.
1269 * if valid, return 0, else return -1
1272 check_nb_rxq(queueid_t rxq)
1274 queueid_t allowed_max_rxq;
1277 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1278 if (rxq > allowed_max_rxq) {
1280 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1281 rxq, allowed_max_rxq, pid);
1288 * Get the allowed maximum number of TX queues.
1289 * *pid return the port id which has minimal value of
1290 * max_tx_queues in all ports.
1293 get_allowed_max_nb_txq(portid_t *pid)
1295 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1296 bool max_txq_valid = false;
1298 struct rte_eth_dev_info dev_info;
1300 RTE_ETH_FOREACH_DEV(pi) {
1301 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1304 max_txq_valid = true;
1305 if (dev_info.max_tx_queues < allowed_max_txq) {
1306 allowed_max_txq = dev_info.max_tx_queues;
1310 return max_txq_valid ? allowed_max_txq : 0;
1314 * Check input txq is valid or not.
1315 * If input txq is not greater than any of maximum number
1316 * of TX queues of all ports, it is valid.
1317 * if valid, return 0, else return -1
1320 check_nb_txq(queueid_t txq)
1322 queueid_t allowed_max_txq;
1325 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1326 if (txq > allowed_max_txq) {
1328 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1329 txq, allowed_max_txq, pid);
1336 * Get the allowed maximum number of RXDs of every rx queue.
1337 * *pid return the port id which has minimal value of
1338 * max_rxd in all queues of all ports.
1341 get_allowed_max_nb_rxd(portid_t *pid)
1343 uint16_t allowed_max_rxd = UINT16_MAX;
1345 struct rte_eth_dev_info dev_info;
1347 RTE_ETH_FOREACH_DEV(pi) {
1348 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1351 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1352 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1356 return allowed_max_rxd;
1360 * Get the allowed minimal number of RXDs of every rx queue.
1361 * *pid return the port id which has minimal value of
1362 * min_rxd in all queues of all ports.
1365 get_allowed_min_nb_rxd(portid_t *pid)
1367 uint16_t allowed_min_rxd = 0;
1369 struct rte_eth_dev_info dev_info;
1371 RTE_ETH_FOREACH_DEV(pi) {
1372 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1375 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1376 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1381 return allowed_min_rxd;
1385 * Check input rxd is valid or not.
1386 * If input rxd is not greater than any of maximum number
1387 * of RXDs of every Rx queues and is not less than any of
1388 * minimal number of RXDs of every Rx queues, it is valid.
1389 * if valid, return 0, else return -1
1392 check_nb_rxd(queueid_t rxd)
1394 uint16_t allowed_max_rxd;
1395 uint16_t allowed_min_rxd;
1398 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1399 if (rxd > allowed_max_rxd) {
1401 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1402 rxd, allowed_max_rxd, pid);
1406 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1407 if (rxd < allowed_min_rxd) {
1409 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1410 rxd, allowed_min_rxd, pid);
1418 * Get the allowed maximum number of TXDs of every rx queues.
1419 * *pid return the port id which has minimal value of
1420 * max_txd in every tx queue.
1423 get_allowed_max_nb_txd(portid_t *pid)
1425 uint16_t allowed_max_txd = UINT16_MAX;
1427 struct rte_eth_dev_info dev_info;
1429 RTE_ETH_FOREACH_DEV(pi) {
1430 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1433 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1434 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1438 return allowed_max_txd;
1442 * Get the allowed maximum number of TXDs of every tx queues.
1443 * *pid return the port id which has minimal value of
1444 * min_txd in every tx queue.
1447 get_allowed_min_nb_txd(portid_t *pid)
1449 uint16_t allowed_min_txd = 0;
1451 struct rte_eth_dev_info dev_info;
1453 RTE_ETH_FOREACH_DEV(pi) {
1454 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1457 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1458 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1463 return allowed_min_txd;
1467 * Check input txd is valid or not.
1468 * If input txd is not greater than any of maximum number
1469 * of TXDs of every Rx queues, it is valid.
1470 * if valid, return 0, else return -1
1473 check_nb_txd(queueid_t txd)
1475 uint16_t allowed_max_txd;
1476 uint16_t allowed_min_txd;
1479 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1480 if (txd > allowed_max_txd) {
1482 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1483 txd, allowed_max_txd, pid);
1487 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1488 if (txd < allowed_min_txd) {
1490 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1491 txd, allowed_min_txd, pid);
1499 * Get the allowed maximum number of hairpin queues.
1500 * *pid return the port id which has minimal value of
1501 * max_hairpin_queues in all ports.
1504 get_allowed_max_nb_hairpinq(portid_t *pid)
1506 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1508 struct rte_eth_hairpin_cap cap;
1510 RTE_ETH_FOREACH_DEV(pi) {
1511 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1515 if (cap.max_nb_queues < allowed_max_hairpinq) {
1516 allowed_max_hairpinq = cap.max_nb_queues;
1520 return allowed_max_hairpinq;
1524 * Check input hairpin is valid or not.
1525 * If input hairpin is not greater than any of maximum number
1526 * of hairpin queues of all ports, it is valid.
1527 * if valid, return 0, else return -1
1530 check_nb_hairpinq(queueid_t hairpinq)
1532 queueid_t allowed_max_hairpinq;
1535 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1536 if (hairpinq > allowed_max_hairpinq) {
1538 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1539 hairpinq, allowed_max_hairpinq, pid);
1546 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1548 struct rte_port *port = &ports[pid];
1553 eth_rx_metadata_negotiate_mp(pid);
1554 flow_pick_transfer_proxy_mp(pid);
1556 port->dev_conf.txmode = tx_mode;
1557 port->dev_conf.rxmode = rx_mode;
1559 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1561 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1563 ret = update_jumbo_frame_offload(pid);
1566 "Updating jumbo frame offload failed for port %u\n",
1569 if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1570 port->dev_conf.txmode.offloads &=
1571 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1573 /* Apply Rx offloads configuration */
1574 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1575 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1576 /* Apply Tx offloads configuration */
1577 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1578 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1581 port->dev_conf.link_speeds = eth_link_speed;
1583 /* set flag to initialize port/queue */
1584 port->need_reconfig = 1;
1585 port->need_reconfig_queues = 1;
1586 port->socket_id = socket_id;
1587 port->tx_metadata = 0;
1590 * Check for maximum number of segments per MTU.
1591 * Accordingly update the mbuf data size.
1593 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1594 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1595 data_size = rx_mode.max_rx_pkt_len /
1596 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1598 if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
1599 mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
1600 TESTPMD_LOG(WARNING,
1601 "Configured mbuf size of the first segment %hu\n",
1611 struct rte_mempool *mbp;
1612 unsigned int nb_mbuf_per_pool;
1614 struct rte_gro_param gro_param;
1617 /* Configuration of logical cores. */
1618 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1619 sizeof(struct fwd_lcore *) * nb_lcores,
1620 RTE_CACHE_LINE_SIZE);
1621 if (fwd_lcores == NULL) {
1622 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1623 "failed\n", nb_lcores);
1625 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1626 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1627 sizeof(struct fwd_lcore),
1628 RTE_CACHE_LINE_SIZE);
1629 if (fwd_lcores[lc_id] == NULL) {
1630 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1633 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1636 RTE_ETH_FOREACH_DEV(pid) {
1640 socket_id = port_numa[pid];
1641 if (port_numa[pid] == NUMA_NO_CONFIG) {
1642 socket_id = rte_eth_dev_socket_id(pid);
1645 * if socket_id is invalid,
1646 * set to the first available socket.
1648 if (check_socket_id(socket_id) < 0)
1649 socket_id = socket_ids[0];
1652 socket_id = (socket_num == UMA_NO_CONFIG) ?
1655 /* Apply default TxRx configuration for all ports */
1656 init_config_port_offloads(pid, socket_id);
1659 * Create pools of mbuf.
1660 * If NUMA support is disabled, create a single pool of mbuf in
1661 * socket 0 memory by default.
1662 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1664 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1665 * nb_txd can be configured at run time.
1667 if (param_total_num_mbufs)
1668 nb_mbuf_per_pool = param_total_num_mbufs;
1670 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1671 (nb_lcores * mb_mempool_cache) +
1672 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1673 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1679 for (i = 0; i < num_sockets; i++)
1680 for (j = 0; j < mbuf_data_size_n; j++)
1681 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1682 mbuf_pool_create(mbuf_data_size[j],
1688 for (i = 0; i < mbuf_data_size_n; i++)
1689 mempools[i] = mbuf_pool_create
1692 socket_num == UMA_NO_CONFIG ?
1698 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1699 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1701 * Records which Mbuf pool to use by each logical core, if needed.
1703 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1704 mbp = mbuf_pool_find(
1705 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1708 mbp = mbuf_pool_find(0, 0);
1709 fwd_lcores[lc_id]->mbp = mbp;
1710 /* initialize GSO context */
1711 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1712 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1713 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1714 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1716 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1721 /* create a gro context for each lcore */
1722 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1723 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1724 gro_param.max_item_per_flow = MAX_PKT_BURST;
1725 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1726 gro_param.socket_id = rte_lcore_to_socket_id(
1727 fwd_lcores_cpuids[lc_id]);
1728 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1729 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1730 rte_exit(EXIT_FAILURE,
1731 "rte_gro_ctx_create() failed\n");
1738 reconfig(portid_t new_port_id, unsigned socket_id)
1740 /* Reconfiguration of Ethernet ports. */
1741 init_config_port_offloads(new_port_id, socket_id);
1747 init_fwd_streams(void)
1750 struct rte_port *port;
1751 streamid_t sm_id, nb_fwd_streams_new;
1754 /* set socket id according to numa or not */
1755 RTE_ETH_FOREACH_DEV(pid) {
1757 if (nb_rxq > port->dev_info.max_rx_queues) {
1759 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1760 nb_rxq, port->dev_info.max_rx_queues);
1763 if (nb_txq > port->dev_info.max_tx_queues) {
1765 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1766 nb_txq, port->dev_info.max_tx_queues);
1770 if (port_numa[pid] != NUMA_NO_CONFIG)
1771 port->socket_id = port_numa[pid];
1773 port->socket_id = rte_eth_dev_socket_id(pid);
1776 * if socket_id is invalid,
1777 * set to the first available socket.
1779 if (check_socket_id(port->socket_id) < 0)
1780 port->socket_id = socket_ids[0];
1784 if (socket_num == UMA_NO_CONFIG)
1785 port->socket_id = 0;
1787 port->socket_id = socket_num;
1791 q = RTE_MAX(nb_rxq, nb_txq);
1794 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1797 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1798 if (nb_fwd_streams_new == nb_fwd_streams)
1801 if (fwd_streams != NULL) {
1802 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1803 if (fwd_streams[sm_id] == NULL)
1805 rte_free(fwd_streams[sm_id]);
1806 fwd_streams[sm_id] = NULL;
1808 rte_free(fwd_streams);
1813 nb_fwd_streams = nb_fwd_streams_new;
1814 if (nb_fwd_streams) {
1815 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1816 sizeof(struct fwd_stream *) * nb_fwd_streams,
1817 RTE_CACHE_LINE_SIZE);
1818 if (fwd_streams == NULL)
1819 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1820 " (struct fwd_stream *)) failed\n",
1823 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1824 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1825 " struct fwd_stream", sizeof(struct fwd_stream),
1826 RTE_CACHE_LINE_SIZE);
1827 if (fwd_streams[sm_id] == NULL)
1828 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1829 "(struct fwd_stream) failed\n");
1837 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1839 uint64_t total_burst, sburst;
1841 uint64_t burst_stats[4];
1842 uint16_t pktnb_stats[4];
1844 int burst_percent[4], sburstp;
1848 * First compute the total number of packet bursts and the
1849 * two highest numbers of bursts of the same number of packets.
1851 memset(&burst_stats, 0x0, sizeof(burst_stats));
1852 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1854 /* Show stats for 0 burst size always */
1855 total_burst = pbs->pkt_burst_spread[0];
1856 burst_stats[0] = pbs->pkt_burst_spread[0];
1859 /* Find the next 2 burst sizes with highest occurrences. */
1860 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1861 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1866 total_burst += nb_burst;
1868 if (nb_burst > burst_stats[1]) {
1869 burst_stats[2] = burst_stats[1];
1870 pktnb_stats[2] = pktnb_stats[1];
1871 burst_stats[1] = nb_burst;
1872 pktnb_stats[1] = nb_pkt;
1873 } else if (nb_burst > burst_stats[2]) {
1874 burst_stats[2] = nb_burst;
1875 pktnb_stats[2] = nb_pkt;
1878 if (total_burst == 0)
1881 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1882 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1884 printf("%d%% of other]\n", 100 - sburstp);
1888 sburst += burst_stats[i];
1889 if (sburst == total_burst) {
1890 printf("%d%% of %d pkts]\n",
1891 100 - sburstp, (int) pktnb_stats[i]);
1896 (double)burst_stats[i] / total_burst * 100;
1897 printf("%d%% of %d pkts + ",
1898 burst_percent[i], (int) pktnb_stats[i]);
1899 sburstp += burst_percent[i];
1904 fwd_stream_stats_display(streamid_t stream_id)
1906 struct fwd_stream *fs;
1907 static const char *fwd_top_stats_border = "-------";
1909 fs = fwd_streams[stream_id];
1910 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1911 (fs->fwd_dropped == 0))
1913 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1914 "TX Port=%2d/Queue=%2d %s\n",
1915 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1916 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1917 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1918 " TX-dropped: %-14"PRIu64,
1919 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1921 /* if checksum mode */
1922 if (cur_fwd_eng == &csum_fwd_engine) {
1923 printf(" RX- bad IP checksum: %-14"PRIu64
1924 " Rx- bad L4 checksum: %-14"PRIu64
1925 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1926 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1927 fs->rx_bad_outer_l4_csum);
1928 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1929 fs->rx_bad_outer_ip_csum);
1934 if (record_burst_stats) {
1935 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1936 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1941 fwd_stats_display(void)
1943 static const char *fwd_stats_border = "----------------------";
1944 static const char *acc_stats_border = "+++++++++++++++";
1946 struct fwd_stream *rx_stream;
1947 struct fwd_stream *tx_stream;
1948 uint64_t tx_dropped;
1949 uint64_t rx_bad_ip_csum;
1950 uint64_t rx_bad_l4_csum;
1951 uint64_t rx_bad_outer_l4_csum;
1952 uint64_t rx_bad_outer_ip_csum;
1953 } ports_stats[RTE_MAX_ETHPORTS];
1954 uint64_t total_rx_dropped = 0;
1955 uint64_t total_tx_dropped = 0;
1956 uint64_t total_rx_nombuf = 0;
1957 struct rte_eth_stats stats;
1958 uint64_t fwd_cycles = 0;
1959 uint64_t total_recv = 0;
1960 uint64_t total_xmit = 0;
1961 struct rte_port *port;
1966 memset(ports_stats, 0, sizeof(ports_stats));
1968 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1969 struct fwd_stream *fs = fwd_streams[sm_id];
1971 if (cur_fwd_config.nb_fwd_streams >
1972 cur_fwd_config.nb_fwd_ports) {
1973 fwd_stream_stats_display(sm_id);
1975 ports_stats[fs->tx_port].tx_stream = fs;
1976 ports_stats[fs->rx_port].rx_stream = fs;
1979 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1981 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1982 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1983 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1984 fs->rx_bad_outer_l4_csum;
1985 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
1986 fs->rx_bad_outer_ip_csum;
1988 if (record_core_cycles)
1989 fwd_cycles += fs->core_cycles;
1991 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1992 pt_id = fwd_ports_ids[i];
1993 port = &ports[pt_id];
1995 rte_eth_stats_get(pt_id, &stats);
1996 stats.ipackets -= port->stats.ipackets;
1997 stats.opackets -= port->stats.opackets;
1998 stats.ibytes -= port->stats.ibytes;
1999 stats.obytes -= port->stats.obytes;
2000 stats.imissed -= port->stats.imissed;
2001 stats.oerrors -= port->stats.oerrors;
2002 stats.rx_nombuf -= port->stats.rx_nombuf;
2004 total_recv += stats.ipackets;
2005 total_xmit += stats.opackets;
2006 total_rx_dropped += stats.imissed;
2007 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2008 total_tx_dropped += stats.oerrors;
2009 total_rx_nombuf += stats.rx_nombuf;
2011 printf("\n %s Forward statistics for port %-2d %s\n",
2012 fwd_stats_border, pt_id, fwd_stats_border);
2014 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2015 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2016 stats.ipackets + stats.imissed);
2018 if (cur_fwd_eng == &csum_fwd_engine) {
2019 printf(" Bad-ipcsum: %-14"PRIu64
2020 " Bad-l4csum: %-14"PRIu64
2021 "Bad-outer-l4csum: %-14"PRIu64"\n",
2022 ports_stats[pt_id].rx_bad_ip_csum,
2023 ports_stats[pt_id].rx_bad_l4_csum,
2024 ports_stats[pt_id].rx_bad_outer_l4_csum);
2025 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2026 ports_stats[pt_id].rx_bad_outer_ip_csum);
2028 if (stats.ierrors + stats.rx_nombuf > 0) {
2029 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2030 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2033 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2034 "TX-total: %-"PRIu64"\n",
2035 stats.opackets, ports_stats[pt_id].tx_dropped,
2036 stats.opackets + ports_stats[pt_id].tx_dropped);
2038 if (record_burst_stats) {
2039 if (ports_stats[pt_id].rx_stream)
2040 pkt_burst_stats_display("RX",
2041 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2042 if (ports_stats[pt_id].tx_stream)
2043 pkt_burst_stats_display("TX",
2044 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2047 printf(" %s--------------------------------%s\n",
2048 fwd_stats_border, fwd_stats_border);
2051 printf("\n %s Accumulated forward statistics for all ports"
2053 acc_stats_border, acc_stats_border);
2054 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2056 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2058 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2059 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2060 if (total_rx_nombuf > 0)
2061 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2062 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2064 acc_stats_border, acc_stats_border);
2065 if (record_core_cycles) {
2066 #define CYC_PER_MHZ 1E6
2067 if (total_recv > 0 || total_xmit > 0) {
2068 uint64_t total_pkts = 0;
2069 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2070 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2071 total_pkts = total_xmit;
2073 total_pkts = total_recv;
2075 printf("\n CPU cycles/packet=%.2F (total cycles="
2076 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2078 (double) fwd_cycles / total_pkts,
2079 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2080 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2086 fwd_stats_reset(void)
2092 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2093 pt_id = fwd_ports_ids[i];
2094 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2096 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2097 struct fwd_stream *fs = fwd_streams[sm_id];
2101 fs->fwd_dropped = 0;
2102 fs->rx_bad_ip_csum = 0;
2103 fs->rx_bad_l4_csum = 0;
2104 fs->rx_bad_outer_l4_csum = 0;
2105 fs->rx_bad_outer_ip_csum = 0;
2107 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2108 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2109 fs->core_cycles = 0;
2114 flush_fwd_rx_queues(void)
2116 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2123 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2124 uint64_t timer_period;
2126 if (num_procs > 1) {
2127 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2131 /* convert to number of cycles */
2132 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2134 for (j = 0; j < 2; j++) {
2135 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2136 for (rxq = 0; rxq < nb_rxq; rxq++) {
2137 port_id = fwd_ports_ids[rxp];
2139 * testpmd can stuck in the below do while loop
2140 * if rte_eth_rx_burst() always returns nonzero
2141 * packets. So timer is added to exit this loop
2142 * after 1sec timer expiry.
2144 prev_tsc = rte_rdtsc();
2146 nb_rx = rte_eth_rx_burst(port_id, rxq,
2147 pkts_burst, MAX_PKT_BURST);
2148 for (i = 0; i < nb_rx; i++)
2149 rte_pktmbuf_free(pkts_burst[i]);
2151 cur_tsc = rte_rdtsc();
2152 diff_tsc = cur_tsc - prev_tsc;
2153 timer_tsc += diff_tsc;
2154 } while ((nb_rx > 0) &&
2155 (timer_tsc < timer_period));
2159 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2164 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2166 struct fwd_stream **fsm;
2169 #ifdef RTE_LIB_BITRATESTATS
2170 uint64_t tics_per_1sec;
2171 uint64_t tics_datum;
2172 uint64_t tics_current;
2173 uint16_t i, cnt_ports;
2175 cnt_ports = nb_ports;
2176 tics_datum = rte_rdtsc();
2177 tics_per_1sec = rte_get_timer_hz();
2179 fsm = &fwd_streams[fc->stream_idx];
2180 nb_fs = fc->stream_nb;
2182 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2183 (*pkt_fwd)(fsm[sm_id]);
2184 #ifdef RTE_LIB_BITRATESTATS
2185 if (bitrate_enabled != 0 &&
2186 bitrate_lcore_id == rte_lcore_id()) {
2187 tics_current = rte_rdtsc();
2188 if (tics_current - tics_datum >= tics_per_1sec) {
2189 /* Periodic bitrate calculation */
2190 for (i = 0; i < cnt_ports; i++)
2191 rte_stats_bitrate_calc(bitrate_data,
2193 tics_datum = tics_current;
2197 #ifdef RTE_LIB_LATENCYSTATS
2198 if (latencystats_enabled != 0 &&
2199 latencystats_lcore_id == rte_lcore_id())
2200 rte_latencystats_update();
2203 } while (! fc->stopped);
2207 start_pkt_forward_on_core(void *fwd_arg)
2209 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2210 cur_fwd_config.fwd_eng->packet_fwd);
2215 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2216 * Used to start communication flows in network loopback test configurations.
2219 run_one_txonly_burst_on_core(void *fwd_arg)
2221 struct fwd_lcore *fwd_lc;
2222 struct fwd_lcore tmp_lcore;
2224 fwd_lc = (struct fwd_lcore *) fwd_arg;
2225 tmp_lcore = *fwd_lc;
2226 tmp_lcore.stopped = 1;
2227 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2232 * Launch packet forwarding:
2233 * - Setup per-port forwarding context.
2234 * - launch logical cores with their forwarding configuration.
2237 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2243 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2244 lc_id = fwd_lcores_cpuids[i];
2245 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2246 fwd_lcores[i]->stopped = 0;
2247 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2248 fwd_lcores[i], lc_id);
2251 "launch lcore %u failed - diag=%d\n",
2258 * Launch packet forwarding configuration.
2261 start_packet_forwarding(int with_tx_first)
2263 port_fwd_begin_t port_fwd_begin;
2264 port_fwd_end_t port_fwd_end;
2267 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2268 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2270 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2271 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2273 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2274 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2275 (!nb_rxq || !nb_txq))
2276 rte_exit(EXIT_FAILURE,
2277 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2278 cur_fwd_eng->fwd_mode_name);
2280 if (all_ports_started() == 0) {
2281 fprintf(stderr, "Not all ports were started\n");
2284 if (test_done == 0) {
2285 fprintf(stderr, "Packet forwarding already started\n");
2291 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2292 if (port_fwd_begin != NULL) {
2293 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2294 if (port_fwd_begin(fwd_ports_ids[i])) {
2296 "Packet forwarding is not ready\n");
2302 if (with_tx_first) {
2303 port_fwd_begin = tx_only_engine.port_fwd_begin;
2304 if (port_fwd_begin != NULL) {
2305 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2306 if (port_fwd_begin(fwd_ports_ids[i])) {
2308 "Packet forwarding is not ready\n");
2318 flush_fwd_rx_queues();
2320 pkt_fwd_config_display(&cur_fwd_config);
2321 rxtx_config_display();
2324 if (with_tx_first) {
2325 while (with_tx_first--) {
2326 launch_packet_forwarding(
2327 run_one_txonly_burst_on_core);
2328 rte_eal_mp_wait_lcore();
2330 port_fwd_end = tx_only_engine.port_fwd_end;
2331 if (port_fwd_end != NULL) {
2332 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2333 (*port_fwd_end)(fwd_ports_ids[i]);
2336 launch_packet_forwarding(start_pkt_forward_on_core);
2340 stop_packet_forwarding(void)
2342 port_fwd_end_t port_fwd_end;
2348 fprintf(stderr, "Packet forwarding not started\n");
2351 printf("Telling cores to stop...");
2352 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2353 fwd_lcores[lc_id]->stopped = 1;
2354 printf("\nWaiting for lcores to finish...\n");
2355 rte_eal_mp_wait_lcore();
2356 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2357 if (port_fwd_end != NULL) {
2358 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2359 pt_id = fwd_ports_ids[i];
2360 (*port_fwd_end)(pt_id);
2364 fwd_stats_display();
2366 printf("\nDone.\n");
2371 dev_set_link_up(portid_t pid)
2373 if (rte_eth_dev_set_link_up(pid) < 0)
2374 fprintf(stderr, "\nSet link up fail.\n");
2378 dev_set_link_down(portid_t pid)
2380 if (rte_eth_dev_set_link_down(pid) < 0)
2381 fprintf(stderr, "\nSet link down fail.\n");
2385 all_ports_started(void)
2388 struct rte_port *port;
2390 RTE_ETH_FOREACH_DEV(pi) {
2392 /* Check if there is a port which is not started */
2393 if ((port->port_status != RTE_PORT_STARTED) &&
2394 (port->slave_flag == 0))
2398 /* No port is not started */
2403 port_is_stopped(portid_t port_id)
2405 struct rte_port *port = &ports[port_id];
2407 if ((port->port_status != RTE_PORT_STOPPED) &&
2408 (port->slave_flag == 0))
2414 all_ports_stopped(void)
2418 RTE_ETH_FOREACH_DEV(pi) {
2419 if (!port_is_stopped(pi))
2427 port_is_started(portid_t port_id)
2429 if (port_id_is_invalid(port_id, ENABLED_WARN))
2432 if (ports[port_id].port_status != RTE_PORT_STARTED)
2438 /* Configure the Rx and Tx hairpin queues for the selected port. */
2440 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2443 struct rte_eth_hairpin_conf hairpin_conf = {
2448 struct rte_port *port = &ports[pi];
2449 uint16_t peer_rx_port = pi;
2450 uint16_t peer_tx_port = pi;
2451 uint32_t manual = 1;
2452 uint32_t tx_exp = hairpin_mode & 0x10;
2454 if (!(hairpin_mode & 0xf)) {
2458 } else if (hairpin_mode & 0x1) {
2459 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2460 RTE_ETH_DEV_NO_OWNER);
2461 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2462 peer_tx_port = rte_eth_find_next_owned_by(0,
2463 RTE_ETH_DEV_NO_OWNER);
2464 if (p_pi != RTE_MAX_ETHPORTS) {
2465 peer_rx_port = p_pi;
2469 /* Last port will be the peer RX port of the first. */
2470 RTE_ETH_FOREACH_DEV(next_pi)
2471 peer_rx_port = next_pi;
2474 } else if (hairpin_mode & 0x2) {
2476 peer_rx_port = p_pi;
2478 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2479 RTE_ETH_DEV_NO_OWNER);
2480 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2483 peer_tx_port = peer_rx_port;
2487 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2488 hairpin_conf.peers[0].port = peer_rx_port;
2489 hairpin_conf.peers[0].queue = i + nb_rxq;
2490 hairpin_conf.manual_bind = !!manual;
2491 hairpin_conf.tx_explicit = !!tx_exp;
2492 diag = rte_eth_tx_hairpin_queue_setup
2493 (pi, qi, nb_txd, &hairpin_conf);
2498 /* Fail to setup rx queue, return */
2499 if (rte_atomic16_cmpset(&(port->port_status),
2501 RTE_PORT_STOPPED) == 0)
2503 "Port %d can not be set back to stopped\n", pi);
2504 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2506 /* try to reconfigure queues next time */
2507 port->need_reconfig_queues = 1;
2510 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2511 hairpin_conf.peers[0].port = peer_tx_port;
2512 hairpin_conf.peers[0].queue = i + nb_txq;
2513 hairpin_conf.manual_bind = !!manual;
2514 hairpin_conf.tx_explicit = !!tx_exp;
2515 diag = rte_eth_rx_hairpin_queue_setup
2516 (pi, qi, nb_rxd, &hairpin_conf);
2521 /* Fail to setup rx queue, return */
2522 if (rte_atomic16_cmpset(&(port->port_status),
2524 RTE_PORT_STOPPED) == 0)
2526 "Port %d can not be set back to stopped\n", pi);
2527 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2529 /* try to reconfigure queues next time */
2530 port->need_reconfig_queues = 1;
2536 /* Configure the Rx with optional split. */
2538 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2539 uint16_t nb_rx_desc, unsigned int socket_id,
2540 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2542 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2543 unsigned int i, mp_n;
2546 if (rx_pkt_nb_segs <= 1 ||
2547 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2548 rx_conf->rx_seg = NULL;
2549 rx_conf->rx_nseg = 0;
2550 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2551 nb_rx_desc, socket_id,
2555 for (i = 0; i < rx_pkt_nb_segs; i++) {
2556 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2557 struct rte_mempool *mpx;
2559 * Use last valid pool for the segments with number
2560 * exceeding the pool index.
2562 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2563 mpx = mbuf_pool_find(socket_id, mp_n);
2564 /* Handle zero as mbuf data buffer size. */
2565 rx_seg->length = rx_pkt_seg_lengths[i] ?
2566 rx_pkt_seg_lengths[i] :
2567 mbuf_data_size[mp_n];
2568 rx_seg->offset = i < rx_pkt_nb_offs ?
2569 rx_pkt_seg_offsets[i] : 0;
2570 rx_seg->mp = mpx ? mpx : mp;
2572 rx_conf->rx_nseg = rx_pkt_nb_segs;
2573 rx_conf->rx_seg = rx_useg;
2574 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2575 socket_id, rx_conf, NULL);
2576 rx_conf->rx_seg = NULL;
2577 rx_conf->rx_nseg = 0;
2582 alloc_xstats_display_info(portid_t pi)
2584 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2585 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2586 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2588 if (xstats_display_num == 0)
2591 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2592 if (*ids_supp == NULL)
2595 *prev_values = calloc(xstats_display_num,
2596 sizeof(**prev_values));
2597 if (*prev_values == NULL)
2598 goto fail_prev_values;
2600 *curr_values = calloc(xstats_display_num,
2601 sizeof(**curr_values));
2602 if (*curr_values == NULL)
2603 goto fail_curr_values;
2605 ports[pi].xstats_info.allocated = true;
2618 free_xstats_display_info(portid_t pi)
2620 if (!ports[pi].xstats_info.allocated)
2622 free(ports[pi].xstats_info.ids_supp);
2623 free(ports[pi].xstats_info.prev_values);
2624 free(ports[pi].xstats_info.curr_values);
2625 ports[pi].xstats_info.allocated = false;
2628 /** Fill helper structures for specified port to show extended statistics. */
2630 fill_xstats_display_info_for_port(portid_t pi)
2632 unsigned int stat, stat_supp;
2633 const char *xstat_name;
2634 struct rte_port *port;
2638 if (xstats_display_num == 0)
2641 if (pi == (portid_t)RTE_PORT_ALL) {
2642 fill_xstats_display_info();
2647 if (port->port_status != RTE_PORT_STARTED)
2650 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2651 rte_exit(EXIT_FAILURE,
2652 "Failed to allocate xstats display memory\n");
2654 ids_supp = port->xstats_info.ids_supp;
2655 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2656 xstat_name = xstats_display[stat].name;
2657 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2658 ids_supp + stat_supp);
2660 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2661 xstat_name, pi, stat);
2667 port->xstats_info.ids_supp_sz = stat_supp;
2670 /** Fill helper structures for all ports to show extended statistics. */
2672 fill_xstats_display_info(void)
2676 if (xstats_display_num == 0)
2679 RTE_ETH_FOREACH_DEV(pi)
2680 fill_xstats_display_info_for_port(pi);
2684 start_port(portid_t pid)
2686 int diag, need_check_link_status = -1;
2688 portid_t p_pi = RTE_MAX_ETHPORTS;
2689 portid_t pl[RTE_MAX_ETHPORTS];
2690 portid_t peer_pl[RTE_MAX_ETHPORTS];
2691 uint16_t cnt_pi = 0;
2692 uint16_t cfg_pi = 0;
2695 struct rte_port *port;
2696 struct rte_eth_hairpin_cap cap;
2698 if (port_id_is_invalid(pid, ENABLED_WARN))
2701 RTE_ETH_FOREACH_DEV(pi) {
2702 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2705 need_check_link_status = 0;
2707 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2708 RTE_PORT_HANDLING) == 0) {
2709 fprintf(stderr, "Port %d is now not stopped\n", pi);
2713 if (port->need_reconfig > 0) {
2714 struct rte_eth_conf dev_conf;
2717 port->need_reconfig = 0;
2719 if (flow_isolate_all) {
2720 int ret = port_flow_isolate(pi, 1);
2723 "Failed to apply isolated mode on port %d\n",
2728 configure_rxtx_dump_callbacks(0);
2729 printf("Configuring Port %d (socket %u)\n", pi,
2731 if (nb_hairpinq > 0 &&
2732 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2734 "Port %d doesn't support hairpin queues\n",
2738 /* configure port */
2739 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2740 nb_txq + nb_hairpinq,
2743 if (rte_atomic16_cmpset(&(port->port_status),
2744 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2746 "Port %d can not be set back to stopped\n",
2748 fprintf(stderr, "Fail to configure port %d\n",
2750 /* try to reconfigure port next time */
2751 port->need_reconfig = 1;
2754 /* get device configuration*/
2756 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2758 "port %d can not get device configuration\n",
2762 /* Apply Rx offloads configuration */
2763 if (dev_conf.rxmode.offloads !=
2764 port->dev_conf.rxmode.offloads) {
2765 port->dev_conf.rxmode.offloads |=
2766 dev_conf.rxmode.offloads;
2768 k < port->dev_info.max_rx_queues;
2770 port->rx_conf[k].offloads |=
2771 dev_conf.rxmode.offloads;
2773 /* Apply Tx offloads configuration */
2774 if (dev_conf.txmode.offloads !=
2775 port->dev_conf.txmode.offloads) {
2776 port->dev_conf.txmode.offloads |=
2777 dev_conf.txmode.offloads;
2779 k < port->dev_info.max_tx_queues;
2781 port->tx_conf[k].offloads |=
2782 dev_conf.txmode.offloads;
2785 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2786 port->need_reconfig_queues = 0;
2787 /* setup tx queues */
2788 for (qi = 0; qi < nb_txq; qi++) {
2789 if ((numa_support) &&
2790 (txring_numa[pi] != NUMA_NO_CONFIG))
2791 diag = rte_eth_tx_queue_setup(pi, qi,
2792 port->nb_tx_desc[qi],
2794 &(port->tx_conf[qi]));
2796 diag = rte_eth_tx_queue_setup(pi, qi,
2797 port->nb_tx_desc[qi],
2799 &(port->tx_conf[qi]));
2804 /* Fail to setup tx queue, return */
2805 if (rte_atomic16_cmpset(&(port->port_status),
2807 RTE_PORT_STOPPED) == 0)
2809 "Port %d can not be set back to stopped\n",
2812 "Fail to configure port %d tx queues\n",
2814 /* try to reconfigure queues next time */
2815 port->need_reconfig_queues = 1;
2818 for (qi = 0; qi < nb_rxq; qi++) {
2819 /* setup rx queues */
2820 if ((numa_support) &&
2821 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2822 struct rte_mempool * mp =
2824 (rxring_numa[pi], 0);
2827 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2832 diag = rx_queue_setup(pi, qi,
2833 port->nb_rx_desc[qi],
2835 &(port->rx_conf[qi]),
2838 struct rte_mempool *mp =
2840 (port->socket_id, 0);
2843 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2847 diag = rx_queue_setup(pi, qi,
2848 port->nb_rx_desc[qi],
2850 &(port->rx_conf[qi]),
2856 /* Fail to setup rx queue, return */
2857 if (rte_atomic16_cmpset(&(port->port_status),
2859 RTE_PORT_STOPPED) == 0)
2861 "Port %d can not be set back to stopped\n",
2864 "Fail to configure port %d rx queues\n",
2866 /* try to reconfigure queues next time */
2867 port->need_reconfig_queues = 1;
2870 /* setup hairpin queues */
2871 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2874 configure_rxtx_dump_callbacks(verbose_level);
2876 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2880 "Port %d: Failed to disable Ptype parsing\n",
2888 diag = eth_dev_start_mp(pi);
2890 fprintf(stderr, "Fail to start port %d: %s\n",
2891 pi, rte_strerror(-diag));
2893 /* Fail to setup rx queue, return */
2894 if (rte_atomic16_cmpset(&(port->port_status),
2895 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2897 "Port %d can not be set back to stopped\n",
2902 if (rte_atomic16_cmpset(&(port->port_status),
2903 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2904 fprintf(stderr, "Port %d can not be set into started\n",
2907 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2908 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2909 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2911 /* at least one port started, need checking link status */
2912 need_check_link_status = 1;
2917 if (need_check_link_status == 1 && !no_link_check)
2918 check_all_ports_link_status(RTE_PORT_ALL);
2919 else if (need_check_link_status == 0)
2920 fprintf(stderr, "Please stop the ports first\n");
2922 if (hairpin_mode & 0xf) {
2926 /* bind all started hairpin ports */
2927 for (i = 0; i < cfg_pi; i++) {
2929 /* bind current Tx to all peer Rx */
2930 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2931 RTE_MAX_ETHPORTS, 1);
2934 for (j = 0; j < peer_pi; j++) {
2935 if (!port_is_started(peer_pl[j]))
2937 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2940 "Error during binding hairpin Tx port %u to %u: %s\n",
2942 rte_strerror(-diag));
2946 /* bind all peer Tx to current Rx */
2947 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2948 RTE_MAX_ETHPORTS, 0);
2951 for (j = 0; j < peer_pi; j++) {
2952 if (!port_is_started(peer_pl[j]))
2954 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2957 "Error during binding hairpin Tx port %u to %u: %s\n",
2959 rte_strerror(-diag));
2966 fill_xstats_display_info_for_port(pid);
2973 stop_port(portid_t pid)
2976 struct rte_port *port;
2977 int need_check_link_status = 0;
2978 portid_t peer_pl[RTE_MAX_ETHPORTS];
2981 if (port_id_is_invalid(pid, ENABLED_WARN))
2984 printf("Stopping ports...\n");
2986 RTE_ETH_FOREACH_DEV(pi) {
2987 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2990 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2992 "Please remove port %d from forwarding configuration.\n",
2997 if (port_is_bonding_slave(pi)) {
2999 "Please remove port %d from bonded device.\n",
3005 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
3006 RTE_PORT_HANDLING) == 0)
3009 if (hairpin_mode & 0xf) {
3012 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3013 /* unbind all peer Tx from current Rx */
3014 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3015 RTE_MAX_ETHPORTS, 0);
3018 for (j = 0; j < peer_pi; j++) {
3019 if (!port_is_started(peer_pl[j]))
3021 rte_eth_hairpin_unbind(peer_pl[j], pi);
3025 if (port->flow_list)
3026 port_flow_flush(pi);
3028 if (eth_dev_stop_mp(pi) != 0)
3029 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3032 if (rte_atomic16_cmpset(&(port->port_status),
3033 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
3034 fprintf(stderr, "Port %d can not be set into stopped\n",
3036 need_check_link_status = 1;
3038 if (need_check_link_status && !no_link_check)
3039 check_all_ports_link_status(RTE_PORT_ALL);
3045 remove_invalid_ports_in(portid_t *array, portid_t *total)
3048 portid_t new_total = 0;
3050 for (i = 0; i < *total; i++)
3051 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3052 array[new_total] = array[i];
3059 remove_invalid_ports(void)
3061 remove_invalid_ports_in(ports_ids, &nb_ports);
3062 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3063 nb_cfg_ports = nb_fwd_ports;
3067 close_port(portid_t pid)
3070 struct rte_port *port;
3072 if (port_id_is_invalid(pid, ENABLED_WARN))
3075 printf("Closing ports...\n");
3077 RTE_ETH_FOREACH_DEV(pi) {
3078 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3081 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3083 "Please remove port %d from forwarding configuration.\n",
3088 if (port_is_bonding_slave(pi)) {
3090 "Please remove port %d from bonded device.\n",
3096 if (rte_atomic16_cmpset(&(port->port_status),
3097 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
3098 fprintf(stderr, "Port %d is already closed\n", pi);
3102 if (is_proc_primary()) {
3103 port_flow_flush(pi);
3104 rte_eth_dev_close(pi);
3107 free_xstats_display_info(pi);
3110 remove_invalid_ports();
3115 reset_port(portid_t pid)
3119 struct rte_port *port;
3121 if (port_id_is_invalid(pid, ENABLED_WARN))
3124 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3125 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3127 "Can not reset port(s), please stop port(s) first.\n");
3131 printf("Resetting ports...\n");
3133 RTE_ETH_FOREACH_DEV(pi) {
3134 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3137 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3139 "Please remove port %d from forwarding configuration.\n",
3144 if (port_is_bonding_slave(pi)) {
3146 "Please remove port %d from bonded device.\n",
3151 diag = rte_eth_dev_reset(pi);
3154 port->need_reconfig = 1;
3155 port->need_reconfig_queues = 1;
3157 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3166 attach_port(char *identifier)
3169 struct rte_dev_iterator iterator;
3171 printf("Attaching a new port...\n");
3173 if (identifier == NULL) {
3174 fprintf(stderr, "Invalid parameters are specified\n");
3178 if (rte_dev_probe(identifier) < 0) {
3179 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3183 /* first attach mode: event */
3184 if (setup_on_probe_event) {
3185 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3186 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3187 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3188 ports[pi].need_setup != 0)
3189 setup_attached_port(pi);
3193 /* second attach mode: iterator */
3194 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3195 /* setup ports matching the devargs used for probing */
3196 if (port_is_forwarding(pi))
3197 continue; /* port was already attached before */
3198 setup_attached_port(pi);
3203 setup_attached_port(portid_t pi)
3205 unsigned int socket_id;
3208 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3209 /* if socket_id is invalid, set to the first available socket. */
3210 if (check_socket_id(socket_id) < 0)
3211 socket_id = socket_ids[0];
3212 reconfig(pi, socket_id);
3213 ret = rte_eth_promiscuous_enable(pi);
3216 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3217 pi, rte_strerror(-ret));
3219 ports_ids[nb_ports++] = pi;
3220 fwd_ports_ids[nb_fwd_ports++] = pi;
3221 nb_cfg_ports = nb_fwd_ports;
3222 ports[pi].need_setup = 0;
3223 ports[pi].port_status = RTE_PORT_STOPPED;
3225 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3230 detach_device(struct rte_device *dev)
3235 fprintf(stderr, "Device already removed\n");
3239 printf("Removing a device...\n");
3241 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3242 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3243 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3244 fprintf(stderr, "Port %u not stopped\n",
3248 port_flow_flush(sibling);
3252 if (rte_dev_remove(dev) < 0) {
3253 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3256 remove_invalid_ports();
3258 printf("Device is detached\n");
3259 printf("Now total ports is %d\n", nb_ports);
3265 detach_port_device(portid_t port_id)
3268 struct rte_eth_dev_info dev_info;
3270 if (port_id_is_invalid(port_id, ENABLED_WARN))
3273 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3274 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3275 fprintf(stderr, "Port not stopped\n");
3278 fprintf(stderr, "Port was not closed\n");
3281 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3284 "Failed to get device info for port %d, not detaching\n",
3288 detach_device(dev_info.device);
3292 detach_devargs(char *identifier)
3294 struct rte_dev_iterator iterator;
3295 struct rte_devargs da;
3298 printf("Removing a device...\n");
3300 memset(&da, 0, sizeof(da));
3301 if (rte_devargs_parsef(&da, "%s", identifier)) {
3302 fprintf(stderr, "cannot parse identifier\n");
3306 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3307 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3308 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3309 fprintf(stderr, "Port %u not stopped\n",
3311 rte_eth_iterator_cleanup(&iterator);
3312 rte_devargs_reset(&da);
3315 port_flow_flush(port_id);
3319 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3320 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3321 da.name, da.bus->name);
3322 rte_devargs_reset(&da);
3326 remove_invalid_ports();
3328 printf("Device %s is detached\n", identifier);
3329 printf("Now total ports is %d\n", nb_ports);
3331 rte_devargs_reset(&da);
3342 stop_packet_forwarding();
3344 #ifndef RTE_EXEC_ENV_WINDOWS
3345 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3347 if (mp_alloc_type == MP_ALLOC_ANON)
3348 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3353 if (ports != NULL) {
3355 RTE_ETH_FOREACH_DEV(pt_id) {
3356 printf("\nStopping port %d...\n", pt_id);
3360 RTE_ETH_FOREACH_DEV(pt_id) {
3361 printf("\nShutting down port %d...\n", pt_id);
3368 ret = rte_dev_event_monitor_stop();
3371 "fail to stop device event monitor.");
3375 ret = rte_dev_event_callback_unregister(NULL,
3376 dev_event_callback, NULL);
3379 "fail to unregister device event callback.\n");
3383 ret = rte_dev_hotplug_handle_disable();
3386 "fail to disable hotplug handling.\n");
3390 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3392 mempool_free_mp(mempools[i]);
3394 free(xstats_display);
3396 printf("\nBye...\n");
3399 typedef void (*cmd_func_t)(void);
3400 struct pmd_test_command {
3401 const char *cmd_name;
3402 cmd_func_t cmd_func;
3405 /* Check the link status of all ports in up to 9s, and print them finally */
3407 check_all_ports_link_status(uint32_t port_mask)
3409 #define CHECK_INTERVAL 100 /* 100ms */
3410 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3412 uint8_t count, all_ports_up, print_flag = 0;
3413 struct rte_eth_link link;
3415 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3417 printf("Checking link statuses...\n");
3419 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3421 RTE_ETH_FOREACH_DEV(portid) {
3422 if ((port_mask & (1 << portid)) == 0)
3424 memset(&link, 0, sizeof(link));
3425 ret = rte_eth_link_get_nowait(portid, &link);
3428 if (print_flag == 1)
3430 "Port %u link get failed: %s\n",
3431 portid, rte_strerror(-ret));
3434 /* print link status if flag set */
3435 if (print_flag == 1) {
3436 rte_eth_link_to_str(link_status,
3437 sizeof(link_status), &link);
3438 printf("Port %d %s\n", portid, link_status);
3441 /* clear all_ports_up flag if any link down */
3442 if (link.link_status == ETH_LINK_DOWN) {
3447 /* after finally printing all link status, get out */
3448 if (print_flag == 1)
3451 if (all_ports_up == 0) {
3453 rte_delay_ms(CHECK_INTERVAL);
3456 /* set the print_flag if all ports up or timeout */
3457 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3467 rmv_port_callback(void *arg)
3469 int need_to_start = 0;
3470 int org_no_link_check = no_link_check;
3471 portid_t port_id = (intptr_t)arg;
3472 struct rte_eth_dev_info dev_info;
3475 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3477 if (!test_done && port_is_forwarding(port_id)) {
3479 stop_packet_forwarding();
3483 no_link_check = org_no_link_check;
3485 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3488 "Failed to get device info for port %d, not detaching\n",
3491 struct rte_device *device = dev_info.device;
3492 close_port(port_id);
3493 detach_device(device); /* might be already removed or have more ports */
3496 start_packet_forwarding(0);
3499 /* This function is used by the interrupt thread */
3501 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3504 RTE_SET_USED(param);
3505 RTE_SET_USED(ret_param);
3507 if (type >= RTE_ETH_EVENT_MAX) {
3509 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3510 port_id, __func__, type);
3512 } else if (event_print_mask & (UINT32_C(1) << type)) {
3513 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3514 eth_event_desc[type]);
3519 case RTE_ETH_EVENT_NEW:
3520 ports[port_id].need_setup = 1;
3521 ports[port_id].port_status = RTE_PORT_HANDLING;
3523 case RTE_ETH_EVENT_INTR_RMV:
3524 if (port_id_is_invalid(port_id, DISABLED_WARN))
3526 if (rte_eal_alarm_set(100000,
3527 rmv_port_callback, (void *)(intptr_t)port_id))
3529 "Could not set up deferred device removal\n");
3531 case RTE_ETH_EVENT_DESTROY:
3532 ports[port_id].port_status = RTE_PORT_CLOSED;
3533 printf("Port %u is closed\n", port_id);
3542 register_eth_event_callback(void)
3545 enum rte_eth_event_type event;
3547 for (event = RTE_ETH_EVENT_UNKNOWN;
3548 event < RTE_ETH_EVENT_MAX; event++) {
3549 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3554 TESTPMD_LOG(ERR, "Failed to register callback for "
3555 "%s event\n", eth_event_desc[event]);
3563 /* This function is used by the interrupt thread */
3565 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3566 __rte_unused void *arg)
3571 if (type >= RTE_DEV_EVENT_MAX) {
3572 fprintf(stderr, "%s called upon invalid event %d\n",
3578 case RTE_DEV_EVENT_REMOVE:
3579 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3581 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3583 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3588 * Because the user's callback is invoked in eal interrupt
3589 * callback, the interrupt callback need to be finished before
3590 * it can be unregistered when detaching device. So finish
3591 * callback soon and use a deferred removal to detach device
3592 * is need. It is a workaround, once the device detaching be
3593 * moved into the eal in the future, the deferred removal could
3596 if (rte_eal_alarm_set(100000,
3597 rmv_port_callback, (void *)(intptr_t)port_id))
3599 "Could not set up deferred device removal\n");
3601 case RTE_DEV_EVENT_ADD:
3602 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3604 /* TODO: After finish kernel driver binding,
3605 * begin to attach port.
3614 rxtx_port_config(struct rte_port *port)
3619 for (qid = 0; qid < nb_rxq; qid++) {
3620 offloads = port->rx_conf[qid].offloads;
3621 port->rx_conf[qid] = port->dev_info.default_rxconf;
3623 port->rx_conf[qid].offloads = offloads;
3625 /* Check if any Rx parameters have been passed */
3626 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3627 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3629 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3630 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3632 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3633 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3635 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3636 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3638 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3639 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3641 port->nb_rx_desc[qid] = nb_rxd;
3644 for (qid = 0; qid < nb_txq; qid++) {
3645 offloads = port->tx_conf[qid].offloads;
3646 port->tx_conf[qid] = port->dev_info.default_txconf;
3648 port->tx_conf[qid].offloads = offloads;
3650 /* Check if any Tx parameters have been passed */
3651 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3652 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3654 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3655 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3657 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3658 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3660 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3661 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3663 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3664 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3666 port->nb_tx_desc[qid] = nb_txd;
3671 * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
3672 * MTU is also aligned if JUMBO_FRAME offload is not set.
3674 * port->dev_info should be set before calling this function.
3676 * return 0 on success, negative on error
3679 update_jumbo_frame_offload(portid_t portid)
3681 struct rte_port *port = &ports[portid];
3682 uint32_t eth_overhead;
3683 uint64_t rx_offloads;
3687 /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
3688 if (port->dev_info.max_mtu != UINT16_MAX &&
3689 port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
3690 eth_overhead = port->dev_info.max_rx_pktlen -
3691 port->dev_info.max_mtu;
3693 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3695 rx_offloads = port->dev_conf.rxmode.offloads;
3697 /* Default config value is 0 to use PMD specific overhead */
3698 if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
3699 port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
3701 if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
3702 rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3705 if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3707 "Frame size (%u) is not supported by port %u\n",
3708 port->dev_conf.rxmode.max_rx_pkt_len,
3712 rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3716 if (rx_offloads != port->dev_conf.rxmode.offloads) {
3719 port->dev_conf.rxmode.offloads = rx_offloads;
3721 /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
3722 for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
3724 port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3726 port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3730 /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
3731 * if unset do it here
3733 if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3734 ret = eth_dev_set_mtu_mp(portid,
3735 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
3738 "Failed to set MTU to %u for port %u\n",
3739 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
3747 init_port_config(void)
3750 struct rte_port *port;
3753 RTE_ETH_FOREACH_DEV(pid) {
3755 port->dev_conf.fdir_conf = fdir_conf;
3757 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3762 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3763 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3764 rss_hf & port->dev_info.flow_type_rss_offloads;
3766 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3767 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3770 if (port->dcb_flag == 0) {
3771 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3772 port->dev_conf.rxmode.mq_mode =
3773 (enum rte_eth_rx_mq_mode)
3774 (rx_mq_mode & ETH_MQ_RX_RSS);
3776 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3777 port->dev_conf.rxmode.offloads &=
3778 ~DEV_RX_OFFLOAD_RSS_HASH;
3781 i < port->dev_info.nb_rx_queues;
3783 port->rx_conf[i].offloads &=
3784 ~DEV_RX_OFFLOAD_RSS_HASH;
3788 rxtx_port_config(port);
3790 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3794 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3795 rte_pmd_ixgbe_bypass_init(pid);
3798 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3799 port->dev_conf.intr_conf.lsc = 1;
3800 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3801 port->dev_conf.intr_conf.rmv = 1;
3805 void set_port_slave_flag(portid_t slave_pid)
3807 struct rte_port *port;
3809 port = &ports[slave_pid];
3810 port->slave_flag = 1;
3813 void clear_port_slave_flag(portid_t slave_pid)
3815 struct rte_port *port;
3817 port = &ports[slave_pid];
3818 port->slave_flag = 0;
3821 uint8_t port_is_bonding_slave(portid_t slave_pid)
3823 struct rte_port *port;
3824 struct rte_eth_dev_info dev_info;
3827 port = &ports[slave_pid];
3828 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3831 "Failed to get device info for port id %d,"
3832 "cannot determine if the port is a bonded slave",
3836 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3841 const uint16_t vlan_tags[] = {
3842 0, 1, 2, 3, 4, 5, 6, 7,
3843 8, 9, 10, 11, 12, 13, 14, 15,
3844 16, 17, 18, 19, 20, 21, 22, 23,
3845 24, 25, 26, 27, 28, 29, 30, 31
3849 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3850 enum dcb_mode_enable dcb_mode,
3851 enum rte_eth_nb_tcs num_tcs,
3856 struct rte_eth_rss_conf rss_conf;
3859 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3860 * given above, and the number of traffic classes available for use.
3862 if (dcb_mode == DCB_VT_ENABLED) {
3863 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3864 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3865 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3866 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3868 /* VMDQ+DCB RX and TX configurations */
3869 vmdq_rx_conf->enable_default_pool = 0;
3870 vmdq_rx_conf->default_pool = 0;
3871 vmdq_rx_conf->nb_queue_pools =
3872 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3873 vmdq_tx_conf->nb_queue_pools =
3874 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3876 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3877 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3878 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3879 vmdq_rx_conf->pool_map[i].pools =
3880 1 << (i % vmdq_rx_conf->nb_queue_pools);
3882 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3883 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3884 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3887 /* set DCB mode of RX and TX of multiple queues */
3888 eth_conf->rxmode.mq_mode =
3889 (enum rte_eth_rx_mq_mode)
3890 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3891 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3893 struct rte_eth_dcb_rx_conf *rx_conf =
3894 ð_conf->rx_adv_conf.dcb_rx_conf;
3895 struct rte_eth_dcb_tx_conf *tx_conf =
3896 ð_conf->tx_adv_conf.dcb_tx_conf;
3898 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3900 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3904 rx_conf->nb_tcs = num_tcs;
3905 tx_conf->nb_tcs = num_tcs;
3907 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3908 rx_conf->dcb_tc[i] = i % num_tcs;
3909 tx_conf->dcb_tc[i] = i % num_tcs;
3912 eth_conf->rxmode.mq_mode =
3913 (enum rte_eth_rx_mq_mode)
3914 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3915 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3916 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3920 eth_conf->dcb_capability_en =
3921 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3923 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3929 init_port_dcb_config(portid_t pid,
3930 enum dcb_mode_enable dcb_mode,
3931 enum rte_eth_nb_tcs num_tcs,
3934 struct rte_eth_conf port_conf;
3935 struct rte_port *rte_port;
3939 if (num_procs > 1) {
3940 printf("The multi-process feature doesn't support dcb.\n");
3943 rte_port = &ports[pid];
3945 /* retain the original device configuration. */
3946 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3948 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3949 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3952 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3954 /* re-configure the device . */
3955 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3959 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3963 /* If dev_info.vmdq_pool_base is greater than 0,
3964 * the queue id of vmdq pools is started after pf queues.
3966 if (dcb_mode == DCB_VT_ENABLED &&
3967 rte_port->dev_info.vmdq_pool_base > 0) {
3969 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3974 /* Assume the ports in testpmd have the same dcb capability
3975 * and has the same number of rxq and txq in dcb mode
3977 if (dcb_mode == DCB_VT_ENABLED) {
3978 if (rte_port->dev_info.max_vfs > 0) {
3979 nb_rxq = rte_port->dev_info.nb_rx_queues;
3980 nb_txq = rte_port->dev_info.nb_tx_queues;
3982 nb_rxq = rte_port->dev_info.max_rx_queues;
3983 nb_txq = rte_port->dev_info.max_tx_queues;
3986 /*if vt is disabled, use all pf queues */
3987 if (rte_port->dev_info.vmdq_pool_base == 0) {
3988 nb_rxq = rte_port->dev_info.max_rx_queues;
3989 nb_txq = rte_port->dev_info.max_tx_queues;
3991 nb_rxq = (queueid_t)num_tcs;
3992 nb_txq = (queueid_t)num_tcs;
3996 rx_free_thresh = 64;
3998 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4000 rxtx_port_config(rte_port);
4002 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
4003 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4004 rx_vft_set(pid, vlan_tags[i], 1);
4006 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4010 rte_port->dcb_flag = 1;
4012 /* Enter DCB configuration status */
4023 /* Configuration of Ethernet ports. */
4024 ports = rte_zmalloc("testpmd: ports",
4025 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4026 RTE_CACHE_LINE_SIZE);
4027 if (ports == NULL) {
4028 rte_exit(EXIT_FAILURE,
4029 "rte_zmalloc(%d struct rte_port) failed\n",
4032 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4033 ports[i].xstats_info.allocated = false;
4034 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4035 LIST_INIT(&ports[i].flow_tunnel_list);
4036 /* Initialize ports NUMA structures */
4037 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4038 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4039 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4053 const char clr[] = { 27, '[', '2', 'J', '\0' };
4054 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4056 /* Clear screen and move to top left */
4057 printf("%s%s", clr, top_left);
4059 printf("\nPort statistics ====================================");
4060 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4061 nic_stats_display(fwd_ports_ids[i]);
4067 signal_handler(int signum)
4069 if (signum == SIGINT || signum == SIGTERM) {
4070 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4072 #ifdef RTE_LIB_PDUMP
4073 /* uninitialize packet capture framework */
4076 #ifdef RTE_LIB_LATENCYSTATS
4077 if (latencystats_enabled != 0)
4078 rte_latencystats_uninit();
4081 /* Set flag to indicate the force termination. */
4083 /* exit with the expected status */
4084 #ifndef RTE_EXEC_ENV_WINDOWS
4085 signal(signum, SIG_DFL);
4086 kill(getpid(), signum);
4092 main(int argc, char** argv)
4099 signal(SIGINT, signal_handler);
4100 signal(SIGTERM, signal_handler);
4102 testpmd_logtype = rte_log_register("testpmd");
4103 if (testpmd_logtype < 0)
4104 rte_exit(EXIT_FAILURE, "Cannot register log type");
4105 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4107 diag = rte_eal_init(argc, argv);
4109 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4110 rte_strerror(rte_errno));
4112 ret = register_eth_event_callback();
4114 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4116 #ifdef RTE_LIB_PDUMP
4117 /* initialize packet capture framework */
4122 RTE_ETH_FOREACH_DEV(port_id) {
4123 ports_ids[count] = port_id;
4126 nb_ports = (portid_t) count;
4128 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4130 /* allocate port structures, and init them */
4133 set_def_fwd_config();
4135 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4136 "Check the core mask argument\n");
4138 /* Bitrate/latency stats disabled by default */
4139 #ifdef RTE_LIB_BITRATESTATS
4140 bitrate_enabled = 0;
4142 #ifdef RTE_LIB_LATENCYSTATS
4143 latencystats_enabled = 0;
4146 /* on FreeBSD, mlockall() is disabled by default */
4147 #ifdef RTE_EXEC_ENV_FREEBSD
4156 launch_args_parse(argc, argv);
4158 #ifndef RTE_EXEC_ENV_WINDOWS
4159 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4160 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4165 if (tx_first && interactive)
4166 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4167 "interactive mode.\n");
4169 if (tx_first && lsc_interrupt) {
4171 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4175 if (!nb_rxq && !nb_txq)
4177 "Warning: Either rx or tx queues should be non-zero\n");
4179 if (nb_rxq > 1 && nb_rxq > nb_txq)
4181 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4187 ret = rte_dev_hotplug_handle_enable();
4190 "fail to enable hotplug handling.");
4194 ret = rte_dev_event_monitor_start();
4197 "fail to start device event monitoring.");
4201 ret = rte_dev_event_callback_register(NULL,
4202 dev_event_callback, NULL);
4205 "fail to register device event callback\n");
4210 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4211 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4213 /* set all ports to promiscuous mode by default */
4214 RTE_ETH_FOREACH_DEV(port_id) {
4215 ret = rte_eth_promiscuous_enable(port_id);
4218 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4219 port_id, rte_strerror(-ret));
4222 /* Init metrics library */
4223 rte_metrics_init(rte_socket_id());
4225 #ifdef RTE_LIB_LATENCYSTATS
4226 if (latencystats_enabled != 0) {
4227 int ret = rte_latencystats_init(1, NULL);
4230 "Warning: latencystats init() returned error %d\n",
4232 fprintf(stderr, "Latencystats running on lcore %d\n",
4233 latencystats_lcore_id);
4237 /* Setup bitrate stats */
4238 #ifdef RTE_LIB_BITRATESTATS
4239 if (bitrate_enabled != 0) {
4240 bitrate_data = rte_stats_bitrate_create();
4241 if (bitrate_data == NULL)
4242 rte_exit(EXIT_FAILURE,
4243 "Could not allocate bitrate data.\n");
4244 rte_stats_bitrate_reg(bitrate_data);
4248 #ifdef RTE_LIB_CMDLINE
4249 if (strlen(cmdline_filename) != 0)
4250 cmdline_read_from_file(cmdline_filename);
4252 if (interactive == 1) {
4254 printf("Start automatic packet forwarding\n");
4255 start_packet_forwarding(0);
4267 printf("No commandline core given, start packet forwarding\n");
4268 start_packet_forwarding(tx_first);
4269 if (stats_period != 0) {
4270 uint64_t prev_time = 0, cur_time, diff_time = 0;
4271 uint64_t timer_period;
4273 /* Convert to number of cycles */
4274 timer_period = stats_period * rte_get_timer_hz();
4276 while (f_quit == 0) {
4277 cur_time = rte_get_timer_cycles();
4278 diff_time += cur_time - prev_time;
4280 if (diff_time >= timer_period) {
4282 /* Reset the timer */
4285 /* Sleep to avoid unnecessary checks */
4286 prev_time = cur_time;
4287 rte_delay_us_sleep(US_PER_S);
4291 printf("Press enter to exit\n");
4292 rc = read(0, &c, 1);
4298 ret = rte_eal_cleanup();
4300 rte_exit(EXIT_FAILURE,
4301 "EAL cleanup failed: %s\n", strerror(-ret));
4303 return EXIT_SUCCESS;