1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
52 #include <rte_pmd_ixgbe.h>
55 #include <rte_pdump.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
65 #ifdef RTE_EXEC_ENV_WINDOWS
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
75 #define HUGE_FLAG MAP_HUGETLB
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
95 char cmdline_filename[PATH_MAX] = {0};
98 * NUMA support configuration.
99 * When set, the NUMA support attempts to dispatch the allocation of the
100 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101 * probed ports among the CPU sockets 0 and 1.
102 * Otherwise, all memory is allocated from CPU socket 0.
104 uint8_t numa_support = 1; /**< numa enabled by default */
107 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110 uint8_t socket_num = UMA_NO_CONFIG;
113 * Select mempool allocation type:
114 * - native: use regular DPDK memory
115 * - anon: use regular DPDK memory to create mempool, but populate using
116 * anonymous memory (may not be IOVA-contiguous)
117 * - xmem: use externally allocated hugepage memory
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
122 * Store specified sockets on which memory pool to be used by ports
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which RX ring to be used by ports
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
134 * Store specified sockets on which TX ring to be used by ports
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
140 * Record the Ethernet address of peer target ports to which packets are
142 * Must be instantiated with the ethernet addresses of peer traffic generator
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
149 * Probed Target Environment.
151 struct rte_port *ports; /**< For all probed ethernet ports. */
152 portid_t nb_ports; /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
159 * Test Forwarding Configuration.
160 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t nb_cfg_ports; /**< Number of configured ports. */
166 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
175 * Forwarding engines.
177 struct fwd_engine * fwd_engines[] = {
187 &five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 &ieee1588_fwd_engine,
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
208 * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
211 /** Extended statistics to show. */
212 struct rte_eth_xstat_name *xstats_display;
214 unsigned int xstats_display_num; /**< Size of extended statistics to show */
217 * In container, it cannot terminate the process which running with 'stats-period'
218 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
223 * Max Rx frame size, set by '--max-pkt-len' parameter.
225 uint32_t max_rx_pkt_len;
228 * Configuration of packet segments used to scatter received packets
229 * if some of split features is configured.
231 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
232 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
233 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
234 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
237 * Configuration of packet segments used by the "txonly" processing engine.
239 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
240 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
241 TXONLY_DEF_PACKET_LEN,
243 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
245 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
246 /**< Split policy for packets to TX. */
248 uint8_t txonly_multi_flow;
249 /**< Whether multiple flows are generated in TXONLY mode. */
251 uint32_t tx_pkt_times_inter;
252 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
254 uint32_t tx_pkt_times_intra;
255 /**< Timings for send scheduling in TXONLY mode, time between packets. */
257 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
258 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
259 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
260 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
262 /* current configuration is in DCB or not,0 means it is not in DCB mode */
263 uint8_t dcb_config = 0;
266 * Configurable number of RX/TX queues.
268 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
269 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
270 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
273 * Configurable number of RX/TX ring descriptors.
274 * Defaults are supplied by drivers via ethdev.
276 #define RTE_TEST_RX_DESC_DEFAULT 0
277 #define RTE_TEST_TX_DESC_DEFAULT 0
278 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
279 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
281 #define RTE_PMD_PARAM_UNSET -1
283 * Configurable values of RX and TX ring threshold registers.
286 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
287 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
288 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
290 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
291 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
292 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
295 * Configurable value of RX free threshold.
297 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
300 * Configurable value of RX drop enable.
302 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
305 * Configurable value of TX free threshold.
307 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
310 * Configurable value of TX RS bit threshold.
312 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
315 * Configurable value of buffered packets before sending.
317 uint16_t noisy_tx_sw_bufsz;
320 * Configurable value of packet buffer timeout.
322 uint16_t noisy_tx_sw_buf_flush_time;
325 * Configurable value for size of VNF internal memory area
326 * used for simulating noisy neighbour behaviour
328 uint64_t noisy_lkup_mem_sz;
331 * Configurable value of number of random writes done in
332 * VNF simulation memory area.
334 uint64_t noisy_lkup_num_writes;
337 * Configurable value of number of random reads done in
338 * VNF simulation memory area.
340 uint64_t noisy_lkup_num_reads;
343 * Configurable value of number of random reads/writes done in
344 * VNF simulation memory area.
346 uint64_t noisy_lkup_num_reads_writes;
349 * Receive Side Scaling (RSS) configuration.
351 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
354 * Port topology configuration
356 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
359 * Avoids to flush all the RX streams before starts forwarding.
361 uint8_t no_flush_rx = 0; /* flush by default */
364 * Flow API isolated mode.
366 uint8_t flow_isolate_all;
369 * Avoids to check link status when starting/stopping a port.
371 uint8_t no_link_check = 0; /* check by default */
374 * Don't automatically start all ports in interactive mode.
376 uint8_t no_device_start = 0;
379 * Enable link status change notification
381 uint8_t lsc_interrupt = 1; /* enabled by default */
384 * Enable device removal notification.
386 uint8_t rmv_interrupt = 1; /* enabled by default */
388 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
390 /* After attach, port setup is called on event or by iterator */
391 bool setup_on_probe_event = true;
393 /* Clear ptypes on port initialization. */
394 uint8_t clear_ptypes = true;
396 /* Hairpin ports configuration mode. */
397 uint16_t hairpin_mode;
399 /* Pretty printing of ethdev events */
400 static const char * const eth_event_desc[] = {
401 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
402 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
403 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
404 [RTE_ETH_EVENT_INTR_RESET] = "reset",
405 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
406 [RTE_ETH_EVENT_IPSEC] = "IPsec",
407 [RTE_ETH_EVENT_MACSEC] = "MACsec",
408 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
409 [RTE_ETH_EVENT_NEW] = "device probed",
410 [RTE_ETH_EVENT_DESTROY] = "device released",
411 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
412 [RTE_ETH_EVENT_MAX] = NULL,
416 * Display or mask ether events
417 * Default to all events except VF_MBOX
419 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
420 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
421 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
422 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
423 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
424 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
425 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
426 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
428 * Decide if all memory are locked for performance.
433 * NIC bypass mode configuration options.
436 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
437 /* The NIC bypass watchdog timeout. */
438 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
442 #ifdef RTE_LIB_LATENCYSTATS
445 * Set when latency stats is enabled in the commandline
447 uint8_t latencystats_enabled;
450 * Lcore ID to serive latency statistics.
452 lcoreid_t latencystats_lcore_id = -1;
457 * Ethernet device configuration.
459 struct rte_eth_rxmode rx_mode;
461 struct rte_eth_txmode tx_mode = {
462 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
465 struct rte_fdir_conf fdir_conf = {
466 .mode = RTE_FDIR_MODE_NONE,
467 .pballoc = RTE_FDIR_PBALLOC_64K,
468 .status = RTE_FDIR_REPORT_STATUS,
470 .vlan_tci_mask = 0xFFEF,
472 .src_ip = 0xFFFFFFFF,
473 .dst_ip = 0xFFFFFFFF,
476 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
477 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
479 .src_port_mask = 0xFFFF,
480 .dst_port_mask = 0xFFFF,
481 .mac_addr_byte_mask = 0xFF,
482 .tunnel_type_mask = 1,
483 .tunnel_id_mask = 0xFFFFFFFF,
488 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
491 * Display zero values by default for xstats
493 uint8_t xstats_hide_zero;
496 * Measure of CPU cycles disabled by default
498 uint8_t record_core_cycles;
501 * Display of RX and TX bursts disabled by default
503 uint8_t record_burst_stats;
505 unsigned int num_sockets = 0;
506 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
508 #ifdef RTE_LIB_BITRATESTATS
509 /* Bitrate statistics */
510 struct rte_stats_bitrates *bitrate_data;
511 lcoreid_t bitrate_lcore_id;
512 uint8_t bitrate_enabled;
515 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
516 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
519 * hexadecimal bitmask of RX mq mode can be enabled.
521 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
524 * Used to set forced link speed
526 uint32_t eth_link_speed;
529 * ID of the current process in multi-process, used to
530 * configure the queues to be polled.
535 * Number of processes in multi-process, used to
536 * configure the queues to be polled.
538 unsigned int num_procs = 1;
541 eth_rx_metadata_negotiate_mp(uint16_t port_id)
543 uint64_t rx_meta_features = 0;
546 if (!is_proc_primary())
549 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
550 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
551 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
553 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
555 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
556 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
560 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
561 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
565 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
566 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
569 } else if (ret != -ENOTSUP) {
570 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
571 port_id, rte_strerror(-ret));
576 flow_pick_transfer_proxy_mp(uint16_t port_id)
578 struct rte_port *port = &ports[port_id];
581 port->flow_transfer_proxy = port_id;
583 if (!is_proc_primary())
586 ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
589 fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
590 port_id, rte_strerror(-ret));
595 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
596 const struct rte_eth_conf *dev_conf)
598 if (is_proc_primary())
599 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
605 eth_dev_start_mp(uint16_t port_id)
607 if (is_proc_primary())
608 return rte_eth_dev_start(port_id);
614 eth_dev_stop_mp(uint16_t port_id)
616 if (is_proc_primary())
617 return rte_eth_dev_stop(port_id);
623 mempool_free_mp(struct rte_mempool *mp)
625 if (is_proc_primary())
626 rte_mempool_free(mp);
630 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
632 if (is_proc_primary())
633 return rte_eth_dev_set_mtu(port_id, mtu);
638 /* Forward function declarations */
639 static void setup_attached_port(portid_t pi);
640 static void check_all_ports_link_status(uint32_t port_mask);
641 static int eth_event_callback(portid_t port_id,
642 enum rte_eth_event_type type,
643 void *param, void *ret_param);
644 static void dev_event_callback(const char *device_name,
645 enum rte_dev_event_type type,
647 static void fill_xstats_display_info(void);
650 * Check if all the ports are started.
651 * If yes, return positive value. If not, return zero.
653 static int all_ports_started(void);
655 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
656 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
658 /* Holds the registered mbuf dynamic flags names. */
659 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
663 * Helper function to check if socket is already discovered.
664 * If yes, return positive value. If not, return zero.
667 new_socket_id(unsigned int socket_id)
671 for (i = 0; i < num_sockets; i++) {
672 if (socket_ids[i] == socket_id)
679 * Setup default configuration.
682 set_default_fwd_lcores_config(void)
686 unsigned int sock_num;
689 for (i = 0; i < RTE_MAX_LCORE; i++) {
690 if (!rte_lcore_is_enabled(i))
692 sock_num = rte_lcore_to_socket_id(i);
693 if (new_socket_id(sock_num)) {
694 if (num_sockets >= RTE_MAX_NUMA_NODES) {
695 rte_exit(EXIT_FAILURE,
696 "Total sockets greater than %u\n",
699 socket_ids[num_sockets++] = sock_num;
701 if (i == rte_get_main_lcore())
703 fwd_lcores_cpuids[nb_lc++] = i;
705 nb_lcores = (lcoreid_t) nb_lc;
706 nb_cfg_lcores = nb_lcores;
711 set_def_peer_eth_addrs(void)
715 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
716 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
717 peer_eth_addrs[i].addr_bytes[5] = i;
722 set_default_fwd_ports_config(void)
727 RTE_ETH_FOREACH_DEV(pt_id) {
728 fwd_ports_ids[i++] = pt_id;
730 /* Update sockets info according to the attached device */
731 int socket_id = rte_eth_dev_socket_id(pt_id);
732 if (socket_id >= 0 && new_socket_id(socket_id)) {
733 if (num_sockets >= RTE_MAX_NUMA_NODES) {
734 rte_exit(EXIT_FAILURE,
735 "Total sockets greater than %u\n",
738 socket_ids[num_sockets++] = socket_id;
742 nb_cfg_ports = nb_ports;
743 nb_fwd_ports = nb_ports;
747 set_def_fwd_config(void)
749 set_default_fwd_lcores_config();
750 set_def_peer_eth_addrs();
751 set_default_fwd_ports_config();
754 #ifndef RTE_EXEC_ENV_WINDOWS
755 /* extremely pessimistic estimation of memory required to create a mempool */
757 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
759 unsigned int n_pages, mbuf_per_pg, leftover;
760 uint64_t total_mem, mbuf_mem, obj_sz;
762 /* there is no good way to predict how much space the mempool will
763 * occupy because it will allocate chunks on the fly, and some of those
764 * will come from default DPDK memory while some will come from our
765 * external memory, so just assume 128MB will be enough for everyone.
767 uint64_t hdr_mem = 128 << 20;
769 /* account for possible non-contiguousness */
770 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
772 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
776 mbuf_per_pg = pgsz / obj_sz;
777 leftover = (nb_mbufs % mbuf_per_pg) > 0;
778 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
780 mbuf_mem = n_pages * pgsz;
782 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
784 if (total_mem > SIZE_MAX) {
785 TESTPMD_LOG(ERR, "Memory size too big\n");
788 *out = (size_t)total_mem;
794 pagesz_flags(uint64_t page_sz)
796 /* as per mmap() manpage, all page sizes are log2 of page size
797 * shifted by MAP_HUGE_SHIFT
799 int log2 = rte_log2_u64(page_sz);
801 return (log2 << HUGE_SHIFT);
805 alloc_mem(size_t memsz, size_t pgsz, bool huge)
810 /* allocate anonymous hugepages */
811 flags = MAP_ANONYMOUS | MAP_PRIVATE;
813 flags |= HUGE_FLAG | pagesz_flags(pgsz);
815 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
816 if (addr == MAP_FAILED)
822 struct extmem_param {
826 rte_iova_t *iova_table;
827 unsigned int iova_table_len;
831 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
834 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
835 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
836 unsigned int cur_page, n_pages, pgsz_idx;
837 size_t mem_sz, cur_pgsz;
838 rte_iova_t *iovas = NULL;
842 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
843 /* skip anything that is too big */
844 if (pgsizes[pgsz_idx] > SIZE_MAX)
847 cur_pgsz = pgsizes[pgsz_idx];
849 /* if we were told not to allocate hugepages, override */
851 cur_pgsz = sysconf(_SC_PAGESIZE);
853 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
855 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
859 /* allocate our memory */
860 addr = alloc_mem(mem_sz, cur_pgsz, huge);
862 /* if we couldn't allocate memory with a specified page size,
863 * that doesn't mean we can't do it with other page sizes, so
869 /* store IOVA addresses for every page in this memory area */
870 n_pages = mem_sz / cur_pgsz;
872 iovas = malloc(sizeof(*iovas) * n_pages);
875 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
878 /* lock memory if it's not huge pages */
882 /* populate IOVA addresses */
883 for (cur_page = 0; cur_page < n_pages; cur_page++) {
888 offset = cur_pgsz * cur_page;
889 cur = RTE_PTR_ADD(addr, offset);
891 /* touch the page before getting its IOVA */
892 *(volatile char *)cur = 0;
894 iova = rte_mem_virt2iova(cur);
896 iovas[cur_page] = iova;
901 /* if we couldn't allocate anything */
907 param->pgsz = cur_pgsz;
908 param->iova_table = iovas;
909 param->iova_table_len = n_pages;
916 munmap(addr, mem_sz);
922 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
924 struct extmem_param param;
927 memset(¶m, 0, sizeof(param));
929 /* check if our heap exists */
930 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
932 /* create our heap */
933 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
935 TESTPMD_LOG(ERR, "Cannot create heap\n");
940 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
942 TESTPMD_LOG(ERR, "Cannot create memory area\n");
946 /* we now have a valid memory area, so add it to heap */
947 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
948 param.addr, param.len, param.iova_table,
949 param.iova_table_len, param.pgsz);
951 /* when using VFIO, memory is automatically mapped for DMA by EAL */
953 /* not needed any more */
954 free(param.iova_table);
957 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
958 munmap(param.addr, param.len);
964 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
970 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
971 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
976 RTE_ETH_FOREACH_DEV(pid) {
977 struct rte_eth_dev_info dev_info;
979 ret = eth_dev_info_get_print_err(pid, &dev_info);
982 "unable to get device info for port %d on addr 0x%p,"
983 "mempool unmapping will not be performed\n",
988 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
991 "unable to DMA unmap addr 0x%p "
993 memhdr->addr, dev_info.device->name);
996 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
999 "unable to un-register addr 0x%p\n", memhdr->addr);
1004 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1005 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1008 size_t page_size = sysconf(_SC_PAGESIZE);
1011 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1015 "unable to register addr 0x%p\n", memhdr->addr);
1018 RTE_ETH_FOREACH_DEV(pid) {
1019 struct rte_eth_dev_info dev_info;
1021 ret = eth_dev_info_get_print_err(pid, &dev_info);
1024 "unable to get device info for port %d on addr 0x%p,"
1025 "mempool mapping will not be performed\n",
1029 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1032 "unable to DMA map addr 0x%p "
1034 memhdr->addr, dev_info.device->name);
1041 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1042 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1044 struct rte_pktmbuf_extmem *xmem;
1045 unsigned int ext_num, zone_num, elt_num;
1048 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1049 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1050 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1052 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1054 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1055 "external buffer descriptors\n");
1059 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1060 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1061 const struct rte_memzone *mz;
1062 char mz_name[RTE_MEMZONE_NAMESIZE];
1065 ret = snprintf(mz_name, sizeof(mz_name),
1066 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1067 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1068 errno = ENAMETOOLONG;
1072 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1074 RTE_MEMZONE_IOVA_CONTIG |
1076 RTE_MEMZONE_SIZE_HINT_ONLY,
1080 * The caller exits on external buffer creation
1081 * error, so there is no need to free memzones.
1087 xseg->buf_ptr = mz->addr;
1088 xseg->buf_iova = mz->iova;
1089 xseg->buf_len = EXTBUF_ZONE_SIZE;
1090 xseg->elt_size = elt_size;
1092 if (ext_num == 0 && xmem != NULL) {
1101 * Configuration initialisation done once at init time.
1103 static struct rte_mempool *
1104 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1105 unsigned int socket_id, uint16_t size_idx)
1107 char pool_name[RTE_MEMPOOL_NAMESIZE];
1108 struct rte_mempool *rte_mp = NULL;
1109 #ifndef RTE_EXEC_ENV_WINDOWS
1112 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1114 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1115 if (!is_proc_primary()) {
1116 rte_mp = rte_mempool_lookup(pool_name);
1118 rte_exit(EXIT_FAILURE,
1119 "Get mbuf pool for socket %u failed: %s\n",
1120 socket_id, rte_strerror(rte_errno));
1125 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1126 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1128 switch (mp_alloc_type) {
1129 case MP_ALLOC_NATIVE:
1131 /* wrapper to rte_mempool_create() */
1132 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1133 rte_mbuf_best_mempool_ops());
1134 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1135 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1138 #ifndef RTE_EXEC_ENV_WINDOWS
1141 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1142 mb_size, (unsigned int) mb_mempool_cache,
1143 sizeof(struct rte_pktmbuf_pool_private),
1144 socket_id, mempool_flags);
1148 if (rte_mempool_populate_anon(rte_mp) == 0) {
1149 rte_mempool_free(rte_mp);
1153 rte_pktmbuf_pool_init(rte_mp, NULL);
1154 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1155 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1159 case MP_ALLOC_XMEM_HUGE:
1162 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1164 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1165 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1168 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1169 if (heap_socket < 0)
1170 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1172 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1173 rte_mbuf_best_mempool_ops());
1174 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1175 mb_mempool_cache, 0, mbuf_seg_size,
1182 struct rte_pktmbuf_extmem *ext_mem;
1183 unsigned int ext_num;
1185 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1186 socket_id, pool_name, &ext_mem);
1188 rte_exit(EXIT_FAILURE,
1189 "Can't create pinned data buffers\n");
1191 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1192 rte_mbuf_best_mempool_ops());
1193 rte_mp = rte_pktmbuf_pool_create_extbuf
1194 (pool_name, nb_mbuf, mb_mempool_cache,
1195 0, mbuf_seg_size, socket_id,
1202 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1206 #ifndef RTE_EXEC_ENV_WINDOWS
1209 if (rte_mp == NULL) {
1210 rte_exit(EXIT_FAILURE,
1211 "Creation of mbuf pool for socket %u failed: %s\n",
1212 socket_id, rte_strerror(rte_errno));
1213 } else if (verbose_level > 0) {
1214 rte_mempool_dump(stdout, rte_mp);
1220 * Check given socket id is valid or not with NUMA mode,
1221 * if valid, return 0, else return -1
1224 check_socket_id(const unsigned int socket_id)
1226 static int warning_once = 0;
1228 if (new_socket_id(socket_id)) {
1229 if (!warning_once && numa_support)
1231 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1239 * Get the allowed maximum number of RX queues.
1240 * *pid return the port id which has minimal value of
1241 * max_rx_queues in all ports.
1244 get_allowed_max_nb_rxq(portid_t *pid)
1246 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1247 bool max_rxq_valid = false;
1249 struct rte_eth_dev_info dev_info;
1251 RTE_ETH_FOREACH_DEV(pi) {
1252 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1255 max_rxq_valid = true;
1256 if (dev_info.max_rx_queues < allowed_max_rxq) {
1257 allowed_max_rxq = dev_info.max_rx_queues;
1261 return max_rxq_valid ? allowed_max_rxq : 0;
1265 * Check input rxq is valid or not.
1266 * If input rxq is not greater than any of maximum number
1267 * of RX queues of all ports, it is valid.
1268 * if valid, return 0, else return -1
1271 check_nb_rxq(queueid_t rxq)
1273 queueid_t allowed_max_rxq;
1276 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1277 if (rxq > allowed_max_rxq) {
1279 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1280 rxq, allowed_max_rxq, pid);
1287 * Get the allowed maximum number of TX queues.
1288 * *pid return the port id which has minimal value of
1289 * max_tx_queues in all ports.
1292 get_allowed_max_nb_txq(portid_t *pid)
1294 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1295 bool max_txq_valid = false;
1297 struct rte_eth_dev_info dev_info;
1299 RTE_ETH_FOREACH_DEV(pi) {
1300 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1303 max_txq_valid = true;
1304 if (dev_info.max_tx_queues < allowed_max_txq) {
1305 allowed_max_txq = dev_info.max_tx_queues;
1309 return max_txq_valid ? allowed_max_txq : 0;
1313 * Check input txq is valid or not.
1314 * If input txq is not greater than any of maximum number
1315 * of TX queues of all ports, it is valid.
1316 * if valid, return 0, else return -1
1319 check_nb_txq(queueid_t txq)
1321 queueid_t allowed_max_txq;
1324 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1325 if (txq > allowed_max_txq) {
1327 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1328 txq, allowed_max_txq, pid);
1335 * Get the allowed maximum number of RXDs of every rx queue.
1336 * *pid return the port id which has minimal value of
1337 * max_rxd in all queues of all ports.
1340 get_allowed_max_nb_rxd(portid_t *pid)
1342 uint16_t allowed_max_rxd = UINT16_MAX;
1344 struct rte_eth_dev_info dev_info;
1346 RTE_ETH_FOREACH_DEV(pi) {
1347 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1350 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1351 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1355 return allowed_max_rxd;
1359 * Get the allowed minimal number of RXDs of every rx queue.
1360 * *pid return the port id which has minimal value of
1361 * min_rxd in all queues of all ports.
1364 get_allowed_min_nb_rxd(portid_t *pid)
1366 uint16_t allowed_min_rxd = 0;
1368 struct rte_eth_dev_info dev_info;
1370 RTE_ETH_FOREACH_DEV(pi) {
1371 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1374 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1375 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1380 return allowed_min_rxd;
1384 * Check input rxd is valid or not.
1385 * If input rxd is not greater than any of maximum number
1386 * of RXDs of every Rx queues and is not less than any of
1387 * minimal number of RXDs of every Rx queues, it is valid.
1388 * if valid, return 0, else return -1
1391 check_nb_rxd(queueid_t rxd)
1393 uint16_t allowed_max_rxd;
1394 uint16_t allowed_min_rxd;
1397 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1398 if (rxd > allowed_max_rxd) {
1400 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1401 rxd, allowed_max_rxd, pid);
1405 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1406 if (rxd < allowed_min_rxd) {
1408 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1409 rxd, allowed_min_rxd, pid);
1417 * Get the allowed maximum number of TXDs of every rx queues.
1418 * *pid return the port id which has minimal value of
1419 * max_txd in every tx queue.
1422 get_allowed_max_nb_txd(portid_t *pid)
1424 uint16_t allowed_max_txd = UINT16_MAX;
1426 struct rte_eth_dev_info dev_info;
1428 RTE_ETH_FOREACH_DEV(pi) {
1429 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1432 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1433 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1437 return allowed_max_txd;
1441 * Get the allowed maximum number of TXDs of every tx queues.
1442 * *pid return the port id which has minimal value of
1443 * min_txd in every tx queue.
1446 get_allowed_min_nb_txd(portid_t *pid)
1448 uint16_t allowed_min_txd = 0;
1450 struct rte_eth_dev_info dev_info;
1452 RTE_ETH_FOREACH_DEV(pi) {
1453 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1456 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1457 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1462 return allowed_min_txd;
1466 * Check input txd is valid or not.
1467 * If input txd is not greater than any of maximum number
1468 * of TXDs of every Rx queues, it is valid.
1469 * if valid, return 0, else return -1
1472 check_nb_txd(queueid_t txd)
1474 uint16_t allowed_max_txd;
1475 uint16_t allowed_min_txd;
1478 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1479 if (txd > allowed_max_txd) {
1481 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1482 txd, allowed_max_txd, pid);
1486 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1487 if (txd < allowed_min_txd) {
1489 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1490 txd, allowed_min_txd, pid);
1498 * Get the allowed maximum number of hairpin queues.
1499 * *pid return the port id which has minimal value of
1500 * max_hairpin_queues in all ports.
1503 get_allowed_max_nb_hairpinq(portid_t *pid)
1505 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1507 struct rte_eth_hairpin_cap cap;
1509 RTE_ETH_FOREACH_DEV(pi) {
1510 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1514 if (cap.max_nb_queues < allowed_max_hairpinq) {
1515 allowed_max_hairpinq = cap.max_nb_queues;
1519 return allowed_max_hairpinq;
1523 * Check input hairpin is valid or not.
1524 * If input hairpin is not greater than any of maximum number
1525 * of hairpin queues of all ports, it is valid.
1526 * if valid, return 0, else return -1
1529 check_nb_hairpinq(queueid_t hairpinq)
1531 queueid_t allowed_max_hairpinq;
1534 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1535 if (hairpinq > allowed_max_hairpinq) {
1537 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1538 hairpinq, allowed_max_hairpinq, pid);
1545 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1547 uint32_t eth_overhead;
1549 if (dev_info->max_mtu != UINT16_MAX &&
1550 dev_info->max_rx_pktlen > dev_info->max_mtu)
1551 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1553 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1555 return eth_overhead;
1559 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1561 struct rte_port *port = &ports[pid];
1565 eth_rx_metadata_negotiate_mp(pid);
1566 flow_pick_transfer_proxy_mp(pid);
1568 port->dev_conf.txmode = tx_mode;
1569 port->dev_conf.rxmode = rx_mode;
1571 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1573 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1575 if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1576 port->dev_conf.txmode.offloads &=
1577 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1579 /* Apply Rx offloads configuration */
1580 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1581 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1582 /* Apply Tx offloads configuration */
1583 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1584 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1587 port->dev_conf.link_speeds = eth_link_speed;
1590 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1591 get_eth_overhead(&port->dev_info);
1593 /* set flag to initialize port/queue */
1594 port->need_reconfig = 1;
1595 port->need_reconfig_queues = 1;
1596 port->socket_id = socket_id;
1597 port->tx_metadata = 0;
1600 * Check for maximum number of segments per MTU.
1601 * Accordingly update the mbuf data size.
1603 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1604 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1605 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1608 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1609 uint16_t data_size = (mtu + eth_overhead) /
1610 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1611 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1613 if (buffer_size > mbuf_data_size[0]) {
1614 mbuf_data_size[0] = buffer_size;
1615 TESTPMD_LOG(WARNING,
1616 "Configured mbuf size of the first segment %hu\n",
1627 struct rte_mempool *mbp;
1628 unsigned int nb_mbuf_per_pool;
1630 struct rte_gro_param gro_param;
1633 /* Configuration of logical cores. */
1634 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1635 sizeof(struct fwd_lcore *) * nb_lcores,
1636 RTE_CACHE_LINE_SIZE);
1637 if (fwd_lcores == NULL) {
1638 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1639 "failed\n", nb_lcores);
1641 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1642 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1643 sizeof(struct fwd_lcore),
1644 RTE_CACHE_LINE_SIZE);
1645 if (fwd_lcores[lc_id] == NULL) {
1646 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1649 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1652 RTE_ETH_FOREACH_DEV(pid) {
1656 socket_id = port_numa[pid];
1657 if (port_numa[pid] == NUMA_NO_CONFIG) {
1658 socket_id = rte_eth_dev_socket_id(pid);
1661 * if socket_id is invalid,
1662 * set to the first available socket.
1664 if (check_socket_id(socket_id) < 0)
1665 socket_id = socket_ids[0];
1668 socket_id = (socket_num == UMA_NO_CONFIG) ?
1671 /* Apply default TxRx configuration for all ports */
1672 init_config_port_offloads(pid, socket_id);
1675 * Create pools of mbuf.
1676 * If NUMA support is disabled, create a single pool of mbuf in
1677 * socket 0 memory by default.
1678 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1680 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1681 * nb_txd can be configured at run time.
1683 if (param_total_num_mbufs)
1684 nb_mbuf_per_pool = param_total_num_mbufs;
1686 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1687 (nb_lcores * mb_mempool_cache) +
1688 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1689 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1695 for (i = 0; i < num_sockets; i++)
1696 for (j = 0; j < mbuf_data_size_n; j++)
1697 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1698 mbuf_pool_create(mbuf_data_size[j],
1704 for (i = 0; i < mbuf_data_size_n; i++)
1705 mempools[i] = mbuf_pool_create
1708 socket_num == UMA_NO_CONFIG ?
1714 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1715 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1717 * Records which Mbuf pool to use by each logical core, if needed.
1719 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1720 mbp = mbuf_pool_find(
1721 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1724 mbp = mbuf_pool_find(0, 0);
1725 fwd_lcores[lc_id]->mbp = mbp;
1726 /* initialize GSO context */
1727 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1728 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1729 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1730 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1732 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1737 /* create a gro context for each lcore */
1738 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1739 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1740 gro_param.max_item_per_flow = MAX_PKT_BURST;
1741 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1742 gro_param.socket_id = rte_lcore_to_socket_id(
1743 fwd_lcores_cpuids[lc_id]);
1744 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1745 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1746 rte_exit(EXIT_FAILURE,
1747 "rte_gro_ctx_create() failed\n");
1754 reconfig(portid_t new_port_id, unsigned socket_id)
1756 /* Reconfiguration of Ethernet ports. */
1757 init_config_port_offloads(new_port_id, socket_id);
1763 init_fwd_streams(void)
1766 struct rte_port *port;
1767 streamid_t sm_id, nb_fwd_streams_new;
1770 /* set socket id according to numa or not */
1771 RTE_ETH_FOREACH_DEV(pid) {
1773 if (nb_rxq > port->dev_info.max_rx_queues) {
1775 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1776 nb_rxq, port->dev_info.max_rx_queues);
1779 if (nb_txq > port->dev_info.max_tx_queues) {
1781 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1782 nb_txq, port->dev_info.max_tx_queues);
1786 if (port_numa[pid] != NUMA_NO_CONFIG)
1787 port->socket_id = port_numa[pid];
1789 port->socket_id = rte_eth_dev_socket_id(pid);
1792 * if socket_id is invalid,
1793 * set to the first available socket.
1795 if (check_socket_id(port->socket_id) < 0)
1796 port->socket_id = socket_ids[0];
1800 if (socket_num == UMA_NO_CONFIG)
1801 port->socket_id = 0;
1803 port->socket_id = socket_num;
1807 q = RTE_MAX(nb_rxq, nb_txq);
1810 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1813 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1814 if (nb_fwd_streams_new == nb_fwd_streams)
1817 if (fwd_streams != NULL) {
1818 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1819 if (fwd_streams[sm_id] == NULL)
1821 rte_free(fwd_streams[sm_id]);
1822 fwd_streams[sm_id] = NULL;
1824 rte_free(fwd_streams);
1829 nb_fwd_streams = nb_fwd_streams_new;
1830 if (nb_fwd_streams) {
1831 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1832 sizeof(struct fwd_stream *) * nb_fwd_streams,
1833 RTE_CACHE_LINE_SIZE);
1834 if (fwd_streams == NULL)
1835 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1836 " (struct fwd_stream *)) failed\n",
1839 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1840 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1841 " struct fwd_stream", sizeof(struct fwd_stream),
1842 RTE_CACHE_LINE_SIZE);
1843 if (fwd_streams[sm_id] == NULL)
1844 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1845 "(struct fwd_stream) failed\n");
1853 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1855 uint64_t total_burst, sburst;
1857 uint64_t burst_stats[4];
1858 uint16_t pktnb_stats[4];
1860 int burst_percent[4], sburstp;
1864 * First compute the total number of packet bursts and the
1865 * two highest numbers of bursts of the same number of packets.
1867 memset(&burst_stats, 0x0, sizeof(burst_stats));
1868 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1870 /* Show stats for 0 burst size always */
1871 total_burst = pbs->pkt_burst_spread[0];
1872 burst_stats[0] = pbs->pkt_burst_spread[0];
1875 /* Find the next 2 burst sizes with highest occurrences. */
1876 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1877 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1882 total_burst += nb_burst;
1884 if (nb_burst > burst_stats[1]) {
1885 burst_stats[2] = burst_stats[1];
1886 pktnb_stats[2] = pktnb_stats[1];
1887 burst_stats[1] = nb_burst;
1888 pktnb_stats[1] = nb_pkt;
1889 } else if (nb_burst > burst_stats[2]) {
1890 burst_stats[2] = nb_burst;
1891 pktnb_stats[2] = nb_pkt;
1894 if (total_burst == 0)
1897 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1898 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1900 printf("%d%% of other]\n", 100 - sburstp);
1904 sburst += burst_stats[i];
1905 if (sburst == total_burst) {
1906 printf("%d%% of %d pkts]\n",
1907 100 - sburstp, (int) pktnb_stats[i]);
1912 (double)burst_stats[i] / total_burst * 100;
1913 printf("%d%% of %d pkts + ",
1914 burst_percent[i], (int) pktnb_stats[i]);
1915 sburstp += burst_percent[i];
1920 fwd_stream_stats_display(streamid_t stream_id)
1922 struct fwd_stream *fs;
1923 static const char *fwd_top_stats_border = "-------";
1925 fs = fwd_streams[stream_id];
1926 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1927 (fs->fwd_dropped == 0))
1929 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1930 "TX Port=%2d/Queue=%2d %s\n",
1931 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1932 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1933 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1934 " TX-dropped: %-14"PRIu64,
1935 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1937 /* if checksum mode */
1938 if (cur_fwd_eng == &csum_fwd_engine) {
1939 printf(" RX- bad IP checksum: %-14"PRIu64
1940 " Rx- bad L4 checksum: %-14"PRIu64
1941 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1942 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1943 fs->rx_bad_outer_l4_csum);
1944 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1945 fs->rx_bad_outer_ip_csum);
1950 if (record_burst_stats) {
1951 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1952 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1957 fwd_stats_display(void)
1959 static const char *fwd_stats_border = "----------------------";
1960 static const char *acc_stats_border = "+++++++++++++++";
1962 struct fwd_stream *rx_stream;
1963 struct fwd_stream *tx_stream;
1964 uint64_t tx_dropped;
1965 uint64_t rx_bad_ip_csum;
1966 uint64_t rx_bad_l4_csum;
1967 uint64_t rx_bad_outer_l4_csum;
1968 uint64_t rx_bad_outer_ip_csum;
1969 } ports_stats[RTE_MAX_ETHPORTS];
1970 uint64_t total_rx_dropped = 0;
1971 uint64_t total_tx_dropped = 0;
1972 uint64_t total_rx_nombuf = 0;
1973 struct rte_eth_stats stats;
1974 uint64_t fwd_cycles = 0;
1975 uint64_t total_recv = 0;
1976 uint64_t total_xmit = 0;
1977 struct rte_port *port;
1982 memset(ports_stats, 0, sizeof(ports_stats));
1984 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1985 struct fwd_stream *fs = fwd_streams[sm_id];
1987 if (cur_fwd_config.nb_fwd_streams >
1988 cur_fwd_config.nb_fwd_ports) {
1989 fwd_stream_stats_display(sm_id);
1991 ports_stats[fs->tx_port].tx_stream = fs;
1992 ports_stats[fs->rx_port].rx_stream = fs;
1995 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1997 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1998 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1999 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2000 fs->rx_bad_outer_l4_csum;
2001 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2002 fs->rx_bad_outer_ip_csum;
2004 if (record_core_cycles)
2005 fwd_cycles += fs->core_cycles;
2007 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2008 pt_id = fwd_ports_ids[i];
2009 port = &ports[pt_id];
2011 rte_eth_stats_get(pt_id, &stats);
2012 stats.ipackets -= port->stats.ipackets;
2013 stats.opackets -= port->stats.opackets;
2014 stats.ibytes -= port->stats.ibytes;
2015 stats.obytes -= port->stats.obytes;
2016 stats.imissed -= port->stats.imissed;
2017 stats.oerrors -= port->stats.oerrors;
2018 stats.rx_nombuf -= port->stats.rx_nombuf;
2020 total_recv += stats.ipackets;
2021 total_xmit += stats.opackets;
2022 total_rx_dropped += stats.imissed;
2023 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2024 total_tx_dropped += stats.oerrors;
2025 total_rx_nombuf += stats.rx_nombuf;
2027 printf("\n %s Forward statistics for port %-2d %s\n",
2028 fwd_stats_border, pt_id, fwd_stats_border);
2030 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2031 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2032 stats.ipackets + stats.imissed);
2034 if (cur_fwd_eng == &csum_fwd_engine) {
2035 printf(" Bad-ipcsum: %-14"PRIu64
2036 " Bad-l4csum: %-14"PRIu64
2037 "Bad-outer-l4csum: %-14"PRIu64"\n",
2038 ports_stats[pt_id].rx_bad_ip_csum,
2039 ports_stats[pt_id].rx_bad_l4_csum,
2040 ports_stats[pt_id].rx_bad_outer_l4_csum);
2041 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2042 ports_stats[pt_id].rx_bad_outer_ip_csum);
2044 if (stats.ierrors + stats.rx_nombuf > 0) {
2045 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2046 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2049 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2050 "TX-total: %-"PRIu64"\n",
2051 stats.opackets, ports_stats[pt_id].tx_dropped,
2052 stats.opackets + ports_stats[pt_id].tx_dropped);
2054 if (record_burst_stats) {
2055 if (ports_stats[pt_id].rx_stream)
2056 pkt_burst_stats_display("RX",
2057 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2058 if (ports_stats[pt_id].tx_stream)
2059 pkt_burst_stats_display("TX",
2060 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2063 printf(" %s--------------------------------%s\n",
2064 fwd_stats_border, fwd_stats_border);
2067 printf("\n %s Accumulated forward statistics for all ports"
2069 acc_stats_border, acc_stats_border);
2070 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2072 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2074 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2075 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2076 if (total_rx_nombuf > 0)
2077 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2078 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2080 acc_stats_border, acc_stats_border);
2081 if (record_core_cycles) {
2082 #define CYC_PER_MHZ 1E6
2083 if (total_recv > 0 || total_xmit > 0) {
2084 uint64_t total_pkts = 0;
2085 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2086 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2087 total_pkts = total_xmit;
2089 total_pkts = total_recv;
2091 printf("\n CPU cycles/packet=%.2F (total cycles="
2092 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2094 (double) fwd_cycles / total_pkts,
2095 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2096 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2102 fwd_stats_reset(void)
2108 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2109 pt_id = fwd_ports_ids[i];
2110 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2112 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2113 struct fwd_stream *fs = fwd_streams[sm_id];
2117 fs->fwd_dropped = 0;
2118 fs->rx_bad_ip_csum = 0;
2119 fs->rx_bad_l4_csum = 0;
2120 fs->rx_bad_outer_l4_csum = 0;
2121 fs->rx_bad_outer_ip_csum = 0;
2123 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2124 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2125 fs->core_cycles = 0;
2130 flush_fwd_rx_queues(void)
2132 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2139 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2140 uint64_t timer_period;
2142 if (num_procs > 1) {
2143 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2147 /* convert to number of cycles */
2148 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2150 for (j = 0; j < 2; j++) {
2151 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2152 for (rxq = 0; rxq < nb_rxq; rxq++) {
2153 port_id = fwd_ports_ids[rxp];
2155 * testpmd can stuck in the below do while loop
2156 * if rte_eth_rx_burst() always returns nonzero
2157 * packets. So timer is added to exit this loop
2158 * after 1sec timer expiry.
2160 prev_tsc = rte_rdtsc();
2162 nb_rx = rte_eth_rx_burst(port_id, rxq,
2163 pkts_burst, MAX_PKT_BURST);
2164 for (i = 0; i < nb_rx; i++)
2165 rte_pktmbuf_free(pkts_burst[i]);
2167 cur_tsc = rte_rdtsc();
2168 diff_tsc = cur_tsc - prev_tsc;
2169 timer_tsc += diff_tsc;
2170 } while ((nb_rx > 0) &&
2171 (timer_tsc < timer_period));
2175 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2180 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2182 struct fwd_stream **fsm;
2185 #ifdef RTE_LIB_BITRATESTATS
2186 uint64_t tics_per_1sec;
2187 uint64_t tics_datum;
2188 uint64_t tics_current;
2189 uint16_t i, cnt_ports;
2191 cnt_ports = nb_ports;
2192 tics_datum = rte_rdtsc();
2193 tics_per_1sec = rte_get_timer_hz();
2195 fsm = &fwd_streams[fc->stream_idx];
2196 nb_fs = fc->stream_nb;
2198 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2199 (*pkt_fwd)(fsm[sm_id]);
2200 #ifdef RTE_LIB_BITRATESTATS
2201 if (bitrate_enabled != 0 &&
2202 bitrate_lcore_id == rte_lcore_id()) {
2203 tics_current = rte_rdtsc();
2204 if (tics_current - tics_datum >= tics_per_1sec) {
2205 /* Periodic bitrate calculation */
2206 for (i = 0; i < cnt_ports; i++)
2207 rte_stats_bitrate_calc(bitrate_data,
2209 tics_datum = tics_current;
2213 #ifdef RTE_LIB_LATENCYSTATS
2214 if (latencystats_enabled != 0 &&
2215 latencystats_lcore_id == rte_lcore_id())
2216 rte_latencystats_update();
2219 } while (! fc->stopped);
2223 start_pkt_forward_on_core(void *fwd_arg)
2225 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2226 cur_fwd_config.fwd_eng->packet_fwd);
2231 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2232 * Used to start communication flows in network loopback test configurations.
2235 run_one_txonly_burst_on_core(void *fwd_arg)
2237 struct fwd_lcore *fwd_lc;
2238 struct fwd_lcore tmp_lcore;
2240 fwd_lc = (struct fwd_lcore *) fwd_arg;
2241 tmp_lcore = *fwd_lc;
2242 tmp_lcore.stopped = 1;
2243 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2248 * Launch packet forwarding:
2249 * - Setup per-port forwarding context.
2250 * - launch logical cores with their forwarding configuration.
2253 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2259 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2260 lc_id = fwd_lcores_cpuids[i];
2261 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2262 fwd_lcores[i]->stopped = 0;
2263 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2264 fwd_lcores[i], lc_id);
2267 "launch lcore %u failed - diag=%d\n",
2274 * Launch packet forwarding configuration.
2277 start_packet_forwarding(int with_tx_first)
2279 port_fwd_begin_t port_fwd_begin;
2280 port_fwd_end_t port_fwd_end;
2283 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2284 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2286 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2287 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2289 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2290 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2291 (!nb_rxq || !nb_txq))
2292 rte_exit(EXIT_FAILURE,
2293 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2294 cur_fwd_eng->fwd_mode_name);
2296 if (all_ports_started() == 0) {
2297 fprintf(stderr, "Not all ports were started\n");
2300 if (test_done == 0) {
2301 fprintf(stderr, "Packet forwarding already started\n");
2307 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2308 if (port_fwd_begin != NULL) {
2309 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2310 if (port_fwd_begin(fwd_ports_ids[i])) {
2312 "Packet forwarding is not ready\n");
2318 if (with_tx_first) {
2319 port_fwd_begin = tx_only_engine.port_fwd_begin;
2320 if (port_fwd_begin != NULL) {
2321 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2322 if (port_fwd_begin(fwd_ports_ids[i])) {
2324 "Packet forwarding is not ready\n");
2334 flush_fwd_rx_queues();
2336 pkt_fwd_config_display(&cur_fwd_config);
2337 rxtx_config_display();
2340 if (with_tx_first) {
2341 while (with_tx_first--) {
2342 launch_packet_forwarding(
2343 run_one_txonly_burst_on_core);
2344 rte_eal_mp_wait_lcore();
2346 port_fwd_end = tx_only_engine.port_fwd_end;
2347 if (port_fwd_end != NULL) {
2348 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2349 (*port_fwd_end)(fwd_ports_ids[i]);
2352 launch_packet_forwarding(start_pkt_forward_on_core);
2356 stop_packet_forwarding(void)
2358 port_fwd_end_t port_fwd_end;
2364 fprintf(stderr, "Packet forwarding not started\n");
2367 printf("Telling cores to stop...");
2368 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2369 fwd_lcores[lc_id]->stopped = 1;
2370 printf("\nWaiting for lcores to finish...\n");
2371 rte_eal_mp_wait_lcore();
2372 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2373 if (port_fwd_end != NULL) {
2374 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2375 pt_id = fwd_ports_ids[i];
2376 (*port_fwd_end)(pt_id);
2380 fwd_stats_display();
2382 printf("\nDone.\n");
2387 dev_set_link_up(portid_t pid)
2389 if (rte_eth_dev_set_link_up(pid) < 0)
2390 fprintf(stderr, "\nSet link up fail.\n");
2394 dev_set_link_down(portid_t pid)
2396 if (rte_eth_dev_set_link_down(pid) < 0)
2397 fprintf(stderr, "\nSet link down fail.\n");
2401 all_ports_started(void)
2404 struct rte_port *port;
2406 RTE_ETH_FOREACH_DEV(pi) {
2408 /* Check if there is a port which is not started */
2409 if ((port->port_status != RTE_PORT_STARTED) &&
2410 (port->slave_flag == 0))
2414 /* No port is not started */
2419 port_is_stopped(portid_t port_id)
2421 struct rte_port *port = &ports[port_id];
2423 if ((port->port_status != RTE_PORT_STOPPED) &&
2424 (port->slave_flag == 0))
2430 all_ports_stopped(void)
2434 RTE_ETH_FOREACH_DEV(pi) {
2435 if (!port_is_stopped(pi))
2443 port_is_started(portid_t port_id)
2445 if (port_id_is_invalid(port_id, ENABLED_WARN))
2448 if (ports[port_id].port_status != RTE_PORT_STARTED)
2454 /* Configure the Rx and Tx hairpin queues for the selected port. */
2456 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2459 struct rte_eth_hairpin_conf hairpin_conf = {
2464 struct rte_port *port = &ports[pi];
2465 uint16_t peer_rx_port = pi;
2466 uint16_t peer_tx_port = pi;
2467 uint32_t manual = 1;
2468 uint32_t tx_exp = hairpin_mode & 0x10;
2470 if (!(hairpin_mode & 0xf)) {
2474 } else if (hairpin_mode & 0x1) {
2475 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2476 RTE_ETH_DEV_NO_OWNER);
2477 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2478 peer_tx_port = rte_eth_find_next_owned_by(0,
2479 RTE_ETH_DEV_NO_OWNER);
2480 if (p_pi != RTE_MAX_ETHPORTS) {
2481 peer_rx_port = p_pi;
2485 /* Last port will be the peer RX port of the first. */
2486 RTE_ETH_FOREACH_DEV(next_pi)
2487 peer_rx_port = next_pi;
2490 } else if (hairpin_mode & 0x2) {
2492 peer_rx_port = p_pi;
2494 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2495 RTE_ETH_DEV_NO_OWNER);
2496 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2499 peer_tx_port = peer_rx_port;
2503 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2504 hairpin_conf.peers[0].port = peer_rx_port;
2505 hairpin_conf.peers[0].queue = i + nb_rxq;
2506 hairpin_conf.manual_bind = !!manual;
2507 hairpin_conf.tx_explicit = !!tx_exp;
2508 diag = rte_eth_tx_hairpin_queue_setup
2509 (pi, qi, nb_txd, &hairpin_conf);
2514 /* Fail to setup rx queue, return */
2515 if (rte_atomic16_cmpset(&(port->port_status),
2517 RTE_PORT_STOPPED) == 0)
2519 "Port %d can not be set back to stopped\n", pi);
2520 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2522 /* try to reconfigure queues next time */
2523 port->need_reconfig_queues = 1;
2526 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2527 hairpin_conf.peers[0].port = peer_tx_port;
2528 hairpin_conf.peers[0].queue = i + nb_txq;
2529 hairpin_conf.manual_bind = !!manual;
2530 hairpin_conf.tx_explicit = !!tx_exp;
2531 diag = rte_eth_rx_hairpin_queue_setup
2532 (pi, qi, nb_rxd, &hairpin_conf);
2537 /* Fail to setup rx queue, return */
2538 if (rte_atomic16_cmpset(&(port->port_status),
2540 RTE_PORT_STOPPED) == 0)
2542 "Port %d can not be set back to stopped\n", pi);
2543 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2545 /* try to reconfigure queues next time */
2546 port->need_reconfig_queues = 1;
2552 /* Configure the Rx with optional split. */
2554 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2555 uint16_t nb_rx_desc, unsigned int socket_id,
2556 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2558 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2559 unsigned int i, mp_n;
2562 if (rx_pkt_nb_segs <= 1 ||
2563 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2564 rx_conf->rx_seg = NULL;
2565 rx_conf->rx_nseg = 0;
2566 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2567 nb_rx_desc, socket_id,
2571 for (i = 0; i < rx_pkt_nb_segs; i++) {
2572 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2573 struct rte_mempool *mpx;
2575 * Use last valid pool for the segments with number
2576 * exceeding the pool index.
2578 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2579 mpx = mbuf_pool_find(socket_id, mp_n);
2580 /* Handle zero as mbuf data buffer size. */
2581 rx_seg->length = rx_pkt_seg_lengths[i] ?
2582 rx_pkt_seg_lengths[i] :
2583 mbuf_data_size[mp_n];
2584 rx_seg->offset = i < rx_pkt_nb_offs ?
2585 rx_pkt_seg_offsets[i] : 0;
2586 rx_seg->mp = mpx ? mpx : mp;
2588 rx_conf->rx_nseg = rx_pkt_nb_segs;
2589 rx_conf->rx_seg = rx_useg;
2590 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2591 socket_id, rx_conf, NULL);
2592 rx_conf->rx_seg = NULL;
2593 rx_conf->rx_nseg = 0;
2598 alloc_xstats_display_info(portid_t pi)
2600 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2601 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2602 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2604 if (xstats_display_num == 0)
2607 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2608 if (*ids_supp == NULL)
2611 *prev_values = calloc(xstats_display_num,
2612 sizeof(**prev_values));
2613 if (*prev_values == NULL)
2614 goto fail_prev_values;
2616 *curr_values = calloc(xstats_display_num,
2617 sizeof(**curr_values));
2618 if (*curr_values == NULL)
2619 goto fail_curr_values;
2621 ports[pi].xstats_info.allocated = true;
2634 free_xstats_display_info(portid_t pi)
2636 if (!ports[pi].xstats_info.allocated)
2638 free(ports[pi].xstats_info.ids_supp);
2639 free(ports[pi].xstats_info.prev_values);
2640 free(ports[pi].xstats_info.curr_values);
2641 ports[pi].xstats_info.allocated = false;
2644 /** Fill helper structures for specified port to show extended statistics. */
2646 fill_xstats_display_info_for_port(portid_t pi)
2648 unsigned int stat, stat_supp;
2649 const char *xstat_name;
2650 struct rte_port *port;
2654 if (xstats_display_num == 0)
2657 if (pi == (portid_t)RTE_PORT_ALL) {
2658 fill_xstats_display_info();
2663 if (port->port_status != RTE_PORT_STARTED)
2666 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2667 rte_exit(EXIT_FAILURE,
2668 "Failed to allocate xstats display memory\n");
2670 ids_supp = port->xstats_info.ids_supp;
2671 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2672 xstat_name = xstats_display[stat].name;
2673 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2674 ids_supp + stat_supp);
2676 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2677 xstat_name, pi, stat);
2683 port->xstats_info.ids_supp_sz = stat_supp;
2686 /** Fill helper structures for all ports to show extended statistics. */
2688 fill_xstats_display_info(void)
2692 if (xstats_display_num == 0)
2695 RTE_ETH_FOREACH_DEV(pi)
2696 fill_xstats_display_info_for_port(pi);
2700 start_port(portid_t pid)
2702 int diag, need_check_link_status = -1;
2704 portid_t p_pi = RTE_MAX_ETHPORTS;
2705 portid_t pl[RTE_MAX_ETHPORTS];
2706 portid_t peer_pl[RTE_MAX_ETHPORTS];
2707 uint16_t cnt_pi = 0;
2708 uint16_t cfg_pi = 0;
2711 struct rte_port *port;
2712 struct rte_eth_hairpin_cap cap;
2714 if (port_id_is_invalid(pid, ENABLED_WARN))
2717 RTE_ETH_FOREACH_DEV(pi) {
2718 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2721 need_check_link_status = 0;
2723 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2724 RTE_PORT_HANDLING) == 0) {
2725 fprintf(stderr, "Port %d is now not stopped\n", pi);
2729 if (port->need_reconfig > 0) {
2730 struct rte_eth_conf dev_conf;
2733 port->need_reconfig = 0;
2735 if (flow_isolate_all) {
2736 int ret = port_flow_isolate(pi, 1);
2739 "Failed to apply isolated mode on port %d\n",
2744 configure_rxtx_dump_callbacks(0);
2745 printf("Configuring Port %d (socket %u)\n", pi,
2747 if (nb_hairpinq > 0 &&
2748 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2750 "Port %d doesn't support hairpin queues\n",
2755 /* configure port */
2756 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2757 nb_txq + nb_hairpinq,
2760 if (rte_atomic16_cmpset(&(port->port_status),
2761 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2763 "Port %d can not be set back to stopped\n",
2765 fprintf(stderr, "Fail to configure port %d\n",
2767 /* try to reconfigure port next time */
2768 port->need_reconfig = 1;
2771 /* get device configuration*/
2773 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2775 "port %d can not get device configuration\n",
2779 /* Apply Rx offloads configuration */
2780 if (dev_conf.rxmode.offloads !=
2781 port->dev_conf.rxmode.offloads) {
2782 port->dev_conf.rxmode.offloads |=
2783 dev_conf.rxmode.offloads;
2785 k < port->dev_info.max_rx_queues;
2787 port->rx_conf[k].offloads |=
2788 dev_conf.rxmode.offloads;
2790 /* Apply Tx offloads configuration */
2791 if (dev_conf.txmode.offloads !=
2792 port->dev_conf.txmode.offloads) {
2793 port->dev_conf.txmode.offloads |=
2794 dev_conf.txmode.offloads;
2796 k < port->dev_info.max_tx_queues;
2798 port->tx_conf[k].offloads |=
2799 dev_conf.txmode.offloads;
2802 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2803 port->need_reconfig_queues = 0;
2804 /* setup tx queues */
2805 for (qi = 0; qi < nb_txq; qi++) {
2806 if ((numa_support) &&
2807 (txring_numa[pi] != NUMA_NO_CONFIG))
2808 diag = rte_eth_tx_queue_setup(pi, qi,
2809 port->nb_tx_desc[qi],
2811 &(port->tx_conf[qi]));
2813 diag = rte_eth_tx_queue_setup(pi, qi,
2814 port->nb_tx_desc[qi],
2816 &(port->tx_conf[qi]));
2821 /* Fail to setup tx queue, return */
2822 if (rte_atomic16_cmpset(&(port->port_status),
2824 RTE_PORT_STOPPED) == 0)
2826 "Port %d can not be set back to stopped\n",
2829 "Fail to configure port %d tx queues\n",
2831 /* try to reconfigure queues next time */
2832 port->need_reconfig_queues = 1;
2835 for (qi = 0; qi < nb_rxq; qi++) {
2836 /* setup rx queues */
2837 if ((numa_support) &&
2838 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2839 struct rte_mempool * mp =
2841 (rxring_numa[pi], 0);
2844 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2849 diag = rx_queue_setup(pi, qi,
2850 port->nb_rx_desc[qi],
2852 &(port->rx_conf[qi]),
2855 struct rte_mempool *mp =
2857 (port->socket_id, 0);
2860 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2864 diag = rx_queue_setup(pi, qi,
2865 port->nb_rx_desc[qi],
2867 &(port->rx_conf[qi]),
2873 /* Fail to setup rx queue, return */
2874 if (rte_atomic16_cmpset(&(port->port_status),
2876 RTE_PORT_STOPPED) == 0)
2878 "Port %d can not be set back to stopped\n",
2881 "Fail to configure port %d rx queues\n",
2883 /* try to reconfigure queues next time */
2884 port->need_reconfig_queues = 1;
2887 /* setup hairpin queues */
2888 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2891 configure_rxtx_dump_callbacks(verbose_level);
2893 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2897 "Port %d: Failed to disable Ptype parsing\n",
2905 diag = eth_dev_start_mp(pi);
2907 fprintf(stderr, "Fail to start port %d: %s\n",
2908 pi, rte_strerror(-diag));
2910 /* Fail to setup rx queue, return */
2911 if (rte_atomic16_cmpset(&(port->port_status),
2912 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2914 "Port %d can not be set back to stopped\n",
2919 if (rte_atomic16_cmpset(&(port->port_status),
2920 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2921 fprintf(stderr, "Port %d can not be set into started\n",
2924 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2925 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2926 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2928 /* at least one port started, need checking link status */
2929 need_check_link_status = 1;
2934 if (need_check_link_status == 1 && !no_link_check)
2935 check_all_ports_link_status(RTE_PORT_ALL);
2936 else if (need_check_link_status == 0)
2937 fprintf(stderr, "Please stop the ports first\n");
2939 if (hairpin_mode & 0xf) {
2943 /* bind all started hairpin ports */
2944 for (i = 0; i < cfg_pi; i++) {
2946 /* bind current Tx to all peer Rx */
2947 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2948 RTE_MAX_ETHPORTS, 1);
2951 for (j = 0; j < peer_pi; j++) {
2952 if (!port_is_started(peer_pl[j]))
2954 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2957 "Error during binding hairpin Tx port %u to %u: %s\n",
2959 rte_strerror(-diag));
2963 /* bind all peer Tx to current Rx */
2964 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2965 RTE_MAX_ETHPORTS, 0);
2968 for (j = 0; j < peer_pi; j++) {
2969 if (!port_is_started(peer_pl[j]))
2971 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2974 "Error during binding hairpin Tx port %u to %u: %s\n",
2976 rte_strerror(-diag));
2983 fill_xstats_display_info_for_port(pid);
2990 stop_port(portid_t pid)
2993 struct rte_port *port;
2994 int need_check_link_status = 0;
2995 portid_t peer_pl[RTE_MAX_ETHPORTS];
2998 if (port_id_is_invalid(pid, ENABLED_WARN))
3001 printf("Stopping ports...\n");
3003 RTE_ETH_FOREACH_DEV(pi) {
3004 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3007 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3009 "Please remove port %d from forwarding configuration.\n",
3014 if (port_is_bonding_slave(pi)) {
3016 "Please remove port %d from bonded device.\n",
3022 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
3023 RTE_PORT_HANDLING) == 0)
3026 if (hairpin_mode & 0xf) {
3029 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3030 /* unbind all peer Tx from current Rx */
3031 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3032 RTE_MAX_ETHPORTS, 0);
3035 for (j = 0; j < peer_pi; j++) {
3036 if (!port_is_started(peer_pl[j]))
3038 rte_eth_hairpin_unbind(peer_pl[j], pi);
3042 if (port->flow_list)
3043 port_flow_flush(pi);
3045 if (eth_dev_stop_mp(pi) != 0)
3046 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3049 if (rte_atomic16_cmpset(&(port->port_status),
3050 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
3051 fprintf(stderr, "Port %d can not be set into stopped\n",
3053 need_check_link_status = 1;
3055 if (need_check_link_status && !no_link_check)
3056 check_all_ports_link_status(RTE_PORT_ALL);
3062 remove_invalid_ports_in(portid_t *array, portid_t *total)
3065 portid_t new_total = 0;
3067 for (i = 0; i < *total; i++)
3068 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3069 array[new_total] = array[i];
3076 remove_invalid_ports(void)
3078 remove_invalid_ports_in(ports_ids, &nb_ports);
3079 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3080 nb_cfg_ports = nb_fwd_ports;
3084 close_port(portid_t pid)
3087 struct rte_port *port;
3089 if (port_id_is_invalid(pid, ENABLED_WARN))
3092 printf("Closing ports...\n");
3094 RTE_ETH_FOREACH_DEV(pi) {
3095 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3098 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3100 "Please remove port %d from forwarding configuration.\n",
3105 if (port_is_bonding_slave(pi)) {
3107 "Please remove port %d from bonded device.\n",
3113 if (rte_atomic16_cmpset(&(port->port_status),
3114 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
3115 fprintf(stderr, "Port %d is already closed\n", pi);
3119 if (is_proc_primary()) {
3120 port_flow_flush(pi);
3121 port_flex_item_flush(pi);
3122 rte_eth_dev_close(pi);
3125 free_xstats_display_info(pi);
3128 remove_invalid_ports();
3133 reset_port(portid_t pid)
3137 struct rte_port *port;
3139 if (port_id_is_invalid(pid, ENABLED_WARN))
3142 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3143 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3145 "Can not reset port(s), please stop port(s) first.\n");
3149 printf("Resetting ports...\n");
3151 RTE_ETH_FOREACH_DEV(pi) {
3152 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3155 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3157 "Please remove port %d from forwarding configuration.\n",
3162 if (port_is_bonding_slave(pi)) {
3164 "Please remove port %d from bonded device.\n",
3169 diag = rte_eth_dev_reset(pi);
3172 port->need_reconfig = 1;
3173 port->need_reconfig_queues = 1;
3175 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3184 attach_port(char *identifier)
3187 struct rte_dev_iterator iterator;
3189 printf("Attaching a new port...\n");
3191 if (identifier == NULL) {
3192 fprintf(stderr, "Invalid parameters are specified\n");
3196 if (rte_dev_probe(identifier) < 0) {
3197 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3201 /* first attach mode: event */
3202 if (setup_on_probe_event) {
3203 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3204 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3205 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3206 ports[pi].need_setup != 0)
3207 setup_attached_port(pi);
3211 /* second attach mode: iterator */
3212 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3213 /* setup ports matching the devargs used for probing */
3214 if (port_is_forwarding(pi))
3215 continue; /* port was already attached before */
3216 setup_attached_port(pi);
3221 setup_attached_port(portid_t pi)
3223 unsigned int socket_id;
3226 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3227 /* if socket_id is invalid, set to the first available socket. */
3228 if (check_socket_id(socket_id) < 0)
3229 socket_id = socket_ids[0];
3230 reconfig(pi, socket_id);
3231 ret = rte_eth_promiscuous_enable(pi);
3234 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3235 pi, rte_strerror(-ret));
3237 ports_ids[nb_ports++] = pi;
3238 fwd_ports_ids[nb_fwd_ports++] = pi;
3239 nb_cfg_ports = nb_fwd_ports;
3240 ports[pi].need_setup = 0;
3241 ports[pi].port_status = RTE_PORT_STOPPED;
3243 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3248 detach_device(struct rte_device *dev)
3253 fprintf(stderr, "Device already removed\n");
3257 printf("Removing a device...\n");
3259 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3260 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3261 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3262 fprintf(stderr, "Port %u not stopped\n",
3266 port_flow_flush(sibling);
3270 if (rte_dev_remove(dev) < 0) {
3271 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3274 remove_invalid_ports();
3276 printf("Device is detached\n");
3277 printf("Now total ports is %d\n", nb_ports);
3283 detach_port_device(portid_t port_id)
3286 struct rte_eth_dev_info dev_info;
3288 if (port_id_is_invalid(port_id, ENABLED_WARN))
3291 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3292 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3293 fprintf(stderr, "Port not stopped\n");
3296 fprintf(stderr, "Port was not closed\n");
3299 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3302 "Failed to get device info for port %d, not detaching\n",
3306 detach_device(dev_info.device);
3310 detach_devargs(char *identifier)
3312 struct rte_dev_iterator iterator;
3313 struct rte_devargs da;
3316 printf("Removing a device...\n");
3318 memset(&da, 0, sizeof(da));
3319 if (rte_devargs_parsef(&da, "%s", identifier)) {
3320 fprintf(stderr, "cannot parse identifier\n");
3324 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3325 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3326 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3327 fprintf(stderr, "Port %u not stopped\n",
3329 rte_eth_iterator_cleanup(&iterator);
3330 rte_devargs_reset(&da);
3333 port_flow_flush(port_id);
3337 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3338 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3339 da.name, da.bus->name);
3340 rte_devargs_reset(&da);
3344 remove_invalid_ports();
3346 printf("Device %s is detached\n", identifier);
3347 printf("Now total ports is %d\n", nb_ports);
3349 rte_devargs_reset(&da);
3360 stop_packet_forwarding();
3362 #ifndef RTE_EXEC_ENV_WINDOWS
3363 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3365 if (mp_alloc_type == MP_ALLOC_ANON)
3366 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3371 if (ports != NULL) {
3373 RTE_ETH_FOREACH_DEV(pt_id) {
3374 printf("\nStopping port %d...\n", pt_id);
3378 RTE_ETH_FOREACH_DEV(pt_id) {
3379 printf("\nShutting down port %d...\n", pt_id);
3386 ret = rte_dev_event_monitor_stop();
3389 "fail to stop device event monitor.");
3393 ret = rte_dev_event_callback_unregister(NULL,
3394 dev_event_callback, NULL);
3397 "fail to unregister device event callback.\n");
3401 ret = rte_dev_hotplug_handle_disable();
3404 "fail to disable hotplug handling.\n");
3408 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3410 mempool_free_mp(mempools[i]);
3412 free(xstats_display);
3414 printf("\nBye...\n");
3417 typedef void (*cmd_func_t)(void);
3418 struct pmd_test_command {
3419 const char *cmd_name;
3420 cmd_func_t cmd_func;
3423 /* Check the link status of all ports in up to 9s, and print them finally */
3425 check_all_ports_link_status(uint32_t port_mask)
3427 #define CHECK_INTERVAL 100 /* 100ms */
3428 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3430 uint8_t count, all_ports_up, print_flag = 0;
3431 struct rte_eth_link link;
3433 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3435 printf("Checking link statuses...\n");
3437 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3439 RTE_ETH_FOREACH_DEV(portid) {
3440 if ((port_mask & (1 << portid)) == 0)
3442 memset(&link, 0, sizeof(link));
3443 ret = rte_eth_link_get_nowait(portid, &link);
3446 if (print_flag == 1)
3448 "Port %u link get failed: %s\n",
3449 portid, rte_strerror(-ret));
3452 /* print link status if flag set */
3453 if (print_flag == 1) {
3454 rte_eth_link_to_str(link_status,
3455 sizeof(link_status), &link);
3456 printf("Port %d %s\n", portid, link_status);
3459 /* clear all_ports_up flag if any link down */
3460 if (link.link_status == ETH_LINK_DOWN) {
3465 /* after finally printing all link status, get out */
3466 if (print_flag == 1)
3469 if (all_ports_up == 0) {
3471 rte_delay_ms(CHECK_INTERVAL);
3474 /* set the print_flag if all ports up or timeout */
3475 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3485 rmv_port_callback(void *arg)
3487 int need_to_start = 0;
3488 int org_no_link_check = no_link_check;
3489 portid_t port_id = (intptr_t)arg;
3490 struct rte_eth_dev_info dev_info;
3493 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3495 if (!test_done && port_is_forwarding(port_id)) {
3497 stop_packet_forwarding();
3501 no_link_check = org_no_link_check;
3503 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3506 "Failed to get device info for port %d, not detaching\n",
3509 struct rte_device *device = dev_info.device;
3510 close_port(port_id);
3511 detach_device(device); /* might be already removed or have more ports */
3514 start_packet_forwarding(0);
3517 /* This function is used by the interrupt thread */
3519 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3522 RTE_SET_USED(param);
3523 RTE_SET_USED(ret_param);
3525 if (type >= RTE_ETH_EVENT_MAX) {
3527 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3528 port_id, __func__, type);
3530 } else if (event_print_mask & (UINT32_C(1) << type)) {
3531 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3532 eth_event_desc[type]);
3537 case RTE_ETH_EVENT_NEW:
3538 ports[port_id].need_setup = 1;
3539 ports[port_id].port_status = RTE_PORT_HANDLING;
3541 case RTE_ETH_EVENT_INTR_RMV:
3542 if (port_id_is_invalid(port_id, DISABLED_WARN))
3544 if (rte_eal_alarm_set(100000,
3545 rmv_port_callback, (void *)(intptr_t)port_id))
3547 "Could not set up deferred device removal\n");
3549 case RTE_ETH_EVENT_DESTROY:
3550 ports[port_id].port_status = RTE_PORT_CLOSED;
3551 printf("Port %u is closed\n", port_id);
3560 register_eth_event_callback(void)
3563 enum rte_eth_event_type event;
3565 for (event = RTE_ETH_EVENT_UNKNOWN;
3566 event < RTE_ETH_EVENT_MAX; event++) {
3567 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3572 TESTPMD_LOG(ERR, "Failed to register callback for "
3573 "%s event\n", eth_event_desc[event]);
3581 /* This function is used by the interrupt thread */
3583 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3584 __rte_unused void *arg)
3589 if (type >= RTE_DEV_EVENT_MAX) {
3590 fprintf(stderr, "%s called upon invalid event %d\n",
3596 case RTE_DEV_EVENT_REMOVE:
3597 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3599 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3601 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3606 * Because the user's callback is invoked in eal interrupt
3607 * callback, the interrupt callback need to be finished before
3608 * it can be unregistered when detaching device. So finish
3609 * callback soon and use a deferred removal to detach device
3610 * is need. It is a workaround, once the device detaching be
3611 * moved into the eal in the future, the deferred removal could
3614 if (rte_eal_alarm_set(100000,
3615 rmv_port_callback, (void *)(intptr_t)port_id))
3617 "Could not set up deferred device removal\n");
3619 case RTE_DEV_EVENT_ADD:
3620 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3622 /* TODO: After finish kernel driver binding,
3623 * begin to attach port.
3632 rxtx_port_config(struct rte_port *port)
3637 for (qid = 0; qid < nb_rxq; qid++) {
3638 offloads = port->rx_conf[qid].offloads;
3639 port->rx_conf[qid] = port->dev_info.default_rxconf;
3641 port->rx_conf[qid].offloads = offloads;
3643 /* Check if any Rx parameters have been passed */
3644 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3645 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3647 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3648 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3650 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3651 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3653 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3654 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3656 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3657 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3659 port->nb_rx_desc[qid] = nb_rxd;
3662 for (qid = 0; qid < nb_txq; qid++) {
3663 offloads = port->tx_conf[qid].offloads;
3664 port->tx_conf[qid] = port->dev_info.default_txconf;
3666 port->tx_conf[qid].offloads = offloads;
3668 /* Check if any Tx parameters have been passed */
3669 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3670 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3672 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3673 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3675 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3676 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3678 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3679 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3681 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3682 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3684 port->nb_tx_desc[qid] = nb_txd;
3689 * Helper function to set MTU from frame size
3691 * port->dev_info should be set before calling this function.
3693 * return 0 on success, negative on error
3696 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3698 struct rte_port *port = &ports[portid];
3699 uint32_t eth_overhead;
3700 uint16_t mtu, new_mtu;
3702 eth_overhead = get_eth_overhead(&port->dev_info);
3704 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3705 printf("Failed to get MTU for port %u\n", portid);
3709 new_mtu = max_rx_pktlen - eth_overhead;
3714 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3716 "Failed to set MTU to %u for port %u\n",
3721 port->dev_conf.rxmode.mtu = new_mtu;
3727 init_port_config(void)
3730 struct rte_port *port;
3733 RTE_ETH_FOREACH_DEV(pid) {
3735 port->dev_conf.fdir_conf = fdir_conf;
3737 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3742 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3743 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3744 rss_hf & port->dev_info.flow_type_rss_offloads;
3746 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3747 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3750 if (port->dcb_flag == 0) {
3751 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3752 port->dev_conf.rxmode.mq_mode =
3753 (enum rte_eth_rx_mq_mode)
3754 (rx_mq_mode & ETH_MQ_RX_RSS);
3756 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3757 port->dev_conf.rxmode.offloads &=
3758 ~DEV_RX_OFFLOAD_RSS_HASH;
3761 i < port->dev_info.nb_rx_queues;
3763 port->rx_conf[i].offloads &=
3764 ~DEV_RX_OFFLOAD_RSS_HASH;
3768 rxtx_port_config(port);
3770 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3774 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3775 rte_pmd_ixgbe_bypass_init(pid);
3778 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3779 port->dev_conf.intr_conf.lsc = 1;
3780 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3781 port->dev_conf.intr_conf.rmv = 1;
3785 void set_port_slave_flag(portid_t slave_pid)
3787 struct rte_port *port;
3789 port = &ports[slave_pid];
3790 port->slave_flag = 1;
3793 void clear_port_slave_flag(portid_t slave_pid)
3795 struct rte_port *port;
3797 port = &ports[slave_pid];
3798 port->slave_flag = 0;
3801 uint8_t port_is_bonding_slave(portid_t slave_pid)
3803 struct rte_port *port;
3804 struct rte_eth_dev_info dev_info;
3807 port = &ports[slave_pid];
3808 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3811 "Failed to get device info for port id %d,"
3812 "cannot determine if the port is a bonded slave",
3816 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3821 const uint16_t vlan_tags[] = {
3822 0, 1, 2, 3, 4, 5, 6, 7,
3823 8, 9, 10, 11, 12, 13, 14, 15,
3824 16, 17, 18, 19, 20, 21, 22, 23,
3825 24, 25, 26, 27, 28, 29, 30, 31
3829 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3830 enum dcb_mode_enable dcb_mode,
3831 enum rte_eth_nb_tcs num_tcs,
3836 struct rte_eth_rss_conf rss_conf;
3839 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3840 * given above, and the number of traffic classes available for use.
3842 if (dcb_mode == DCB_VT_ENABLED) {
3843 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3844 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3845 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3846 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3848 /* VMDQ+DCB RX and TX configurations */
3849 vmdq_rx_conf->enable_default_pool = 0;
3850 vmdq_rx_conf->default_pool = 0;
3851 vmdq_rx_conf->nb_queue_pools =
3852 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3853 vmdq_tx_conf->nb_queue_pools =
3854 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3856 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3857 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3858 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3859 vmdq_rx_conf->pool_map[i].pools =
3860 1 << (i % vmdq_rx_conf->nb_queue_pools);
3862 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3863 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3864 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3867 /* set DCB mode of RX and TX of multiple queues */
3868 eth_conf->rxmode.mq_mode =
3869 (enum rte_eth_rx_mq_mode)
3870 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3871 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3873 struct rte_eth_dcb_rx_conf *rx_conf =
3874 ð_conf->rx_adv_conf.dcb_rx_conf;
3875 struct rte_eth_dcb_tx_conf *tx_conf =
3876 ð_conf->tx_adv_conf.dcb_tx_conf;
3878 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3880 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3884 rx_conf->nb_tcs = num_tcs;
3885 tx_conf->nb_tcs = num_tcs;
3887 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3888 rx_conf->dcb_tc[i] = i % num_tcs;
3889 tx_conf->dcb_tc[i] = i % num_tcs;
3892 eth_conf->rxmode.mq_mode =
3893 (enum rte_eth_rx_mq_mode)
3894 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3895 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3896 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3900 eth_conf->dcb_capability_en =
3901 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3903 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3909 init_port_dcb_config(portid_t pid,
3910 enum dcb_mode_enable dcb_mode,
3911 enum rte_eth_nb_tcs num_tcs,
3914 struct rte_eth_conf port_conf;
3915 struct rte_port *rte_port;
3919 if (num_procs > 1) {
3920 printf("The multi-process feature doesn't support dcb.\n");
3923 rte_port = &ports[pid];
3925 /* retain the original device configuration. */
3926 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3928 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3929 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3932 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3934 /* re-configure the device . */
3935 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3939 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3943 /* If dev_info.vmdq_pool_base is greater than 0,
3944 * the queue id of vmdq pools is started after pf queues.
3946 if (dcb_mode == DCB_VT_ENABLED &&
3947 rte_port->dev_info.vmdq_pool_base > 0) {
3949 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3954 /* Assume the ports in testpmd have the same dcb capability
3955 * and has the same number of rxq and txq in dcb mode
3957 if (dcb_mode == DCB_VT_ENABLED) {
3958 if (rte_port->dev_info.max_vfs > 0) {
3959 nb_rxq = rte_port->dev_info.nb_rx_queues;
3960 nb_txq = rte_port->dev_info.nb_tx_queues;
3962 nb_rxq = rte_port->dev_info.max_rx_queues;
3963 nb_txq = rte_port->dev_info.max_tx_queues;
3966 /*if vt is disabled, use all pf queues */
3967 if (rte_port->dev_info.vmdq_pool_base == 0) {
3968 nb_rxq = rte_port->dev_info.max_rx_queues;
3969 nb_txq = rte_port->dev_info.max_tx_queues;
3971 nb_rxq = (queueid_t)num_tcs;
3972 nb_txq = (queueid_t)num_tcs;
3976 rx_free_thresh = 64;
3978 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3980 rxtx_port_config(rte_port);
3982 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3983 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3984 rx_vft_set(pid, vlan_tags[i], 1);
3986 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3990 rte_port->dcb_flag = 1;
3992 /* Enter DCB configuration status */
4003 /* Configuration of Ethernet ports. */
4004 ports = rte_zmalloc("testpmd: ports",
4005 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4006 RTE_CACHE_LINE_SIZE);
4007 if (ports == NULL) {
4008 rte_exit(EXIT_FAILURE,
4009 "rte_zmalloc(%d struct rte_port) failed\n",
4012 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4013 ports[i].xstats_info.allocated = false;
4014 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4015 LIST_INIT(&ports[i].flow_tunnel_list);
4016 /* Initialize ports NUMA structures */
4017 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4018 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4019 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4033 const char clr[] = { 27, '[', '2', 'J', '\0' };
4034 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4036 /* Clear screen and move to top left */
4037 printf("%s%s", clr, top_left);
4039 printf("\nPort statistics ====================================");
4040 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4041 nic_stats_display(fwd_ports_ids[i]);
4047 signal_handler(int signum)
4049 if (signum == SIGINT || signum == SIGTERM) {
4050 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4052 #ifdef RTE_LIB_PDUMP
4053 /* uninitialize packet capture framework */
4056 #ifdef RTE_LIB_LATENCYSTATS
4057 if (latencystats_enabled != 0)
4058 rte_latencystats_uninit();
4061 /* Set flag to indicate the force termination. */
4063 /* exit with the expected status */
4064 #ifndef RTE_EXEC_ENV_WINDOWS
4065 signal(signum, SIG_DFL);
4066 kill(getpid(), signum);
4072 main(int argc, char** argv)
4079 signal(SIGINT, signal_handler);
4080 signal(SIGTERM, signal_handler);
4082 testpmd_logtype = rte_log_register("testpmd");
4083 if (testpmd_logtype < 0)
4084 rte_exit(EXIT_FAILURE, "Cannot register log type");
4085 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4087 diag = rte_eal_init(argc, argv);
4089 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4090 rte_strerror(rte_errno));
4092 ret = register_eth_event_callback();
4094 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4096 #ifdef RTE_LIB_PDUMP
4097 /* initialize packet capture framework */
4102 RTE_ETH_FOREACH_DEV(port_id) {
4103 ports_ids[count] = port_id;
4106 nb_ports = (portid_t) count;
4108 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4110 /* allocate port structures, and init them */
4113 set_def_fwd_config();
4115 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4116 "Check the core mask argument\n");
4118 /* Bitrate/latency stats disabled by default */
4119 #ifdef RTE_LIB_BITRATESTATS
4120 bitrate_enabled = 0;
4122 #ifdef RTE_LIB_LATENCYSTATS
4123 latencystats_enabled = 0;
4126 /* on FreeBSD, mlockall() is disabled by default */
4127 #ifdef RTE_EXEC_ENV_FREEBSD
4136 launch_args_parse(argc, argv);
4138 #ifndef RTE_EXEC_ENV_WINDOWS
4139 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4140 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4145 if (tx_first && interactive)
4146 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4147 "interactive mode.\n");
4149 if (tx_first && lsc_interrupt) {
4151 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4155 if (!nb_rxq && !nb_txq)
4157 "Warning: Either rx or tx queues should be non-zero\n");
4159 if (nb_rxq > 1 && nb_rxq > nb_txq)
4161 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4167 ret = rte_dev_hotplug_handle_enable();
4170 "fail to enable hotplug handling.");
4174 ret = rte_dev_event_monitor_start();
4177 "fail to start device event monitoring.");
4181 ret = rte_dev_event_callback_register(NULL,
4182 dev_event_callback, NULL);
4185 "fail to register device event callback\n");
4190 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4191 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4193 /* set all ports to promiscuous mode by default */
4194 RTE_ETH_FOREACH_DEV(port_id) {
4195 ret = rte_eth_promiscuous_enable(port_id);
4198 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4199 port_id, rte_strerror(-ret));
4202 /* Init metrics library */
4203 rte_metrics_init(rte_socket_id());
4205 #ifdef RTE_LIB_LATENCYSTATS
4206 if (latencystats_enabled != 0) {
4207 int ret = rte_latencystats_init(1, NULL);
4210 "Warning: latencystats init() returned error %d\n",
4212 fprintf(stderr, "Latencystats running on lcore %d\n",
4213 latencystats_lcore_id);
4217 /* Setup bitrate stats */
4218 #ifdef RTE_LIB_BITRATESTATS
4219 if (bitrate_enabled != 0) {
4220 bitrate_data = rte_stats_bitrate_create();
4221 if (bitrate_data == NULL)
4222 rte_exit(EXIT_FAILURE,
4223 "Could not allocate bitrate data.\n");
4224 rte_stats_bitrate_reg(bitrate_data);
4227 #ifdef RTE_LIB_CMDLINE
4228 if (strlen(cmdline_filename) != 0)
4229 cmdline_read_from_file(cmdline_filename);
4231 if (interactive == 1) {
4233 printf("Start automatic packet forwarding\n");
4234 start_packet_forwarding(0);
4246 printf("No commandline core given, start packet forwarding\n");
4247 start_packet_forwarding(tx_first);
4248 if (stats_period != 0) {
4249 uint64_t prev_time = 0, cur_time, diff_time = 0;
4250 uint64_t timer_period;
4252 /* Convert to number of cycles */
4253 timer_period = stats_period * rte_get_timer_hz();
4255 while (f_quit == 0) {
4256 cur_time = rte_get_timer_cycles();
4257 diff_time += cur_time - prev_time;
4259 if (diff_time >= timer_period) {
4261 /* Reset the timer */
4264 /* Sleep to avoid unnecessary checks */
4265 prev_time = cur_time;
4266 rte_delay_us_sleep(US_PER_S);
4270 printf("Press enter to exit\n");
4271 rc = read(0, &c, 1);
4277 ret = rte_eal_cleanup();
4279 rte_exit(EXIT_FAILURE,
4280 "EAL cleanup failed: %s\n", strerror(-ret));
4282 return EXIT_SUCCESS;