1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
70 #include <rte_eth_bond.h>
76 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
77 #define HUGE_FLAG (0x40000)
79 #define HUGE_FLAG MAP_HUGETLB
82 #ifndef MAP_HUGE_SHIFT
83 /* older kernels (or FreeBSD) will not have this define */
84 #define HUGE_SHIFT (26)
86 #define HUGE_SHIFT MAP_HUGE_SHIFT
89 #define EXTMEM_HEAP_NAME "extmem"
91 * Zone size with the malloc overhead (max of debug and release variants)
92 * must fit into the smallest supported hugepage size (2M),
93 * so that an IOVA-contiguous zone of this size can always be allocated
94 * if there are free 2M hugepages.
96 #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
98 uint16_t verbose_level = 0; /**< Silent by default. */
99 int testpmd_logtype; /**< Log type for testpmd logs */
101 /* use main core for command line ? */
102 uint8_t interactive = 0;
103 uint8_t auto_start = 0;
105 char cmdline_filename[PATH_MAX] = {0};
108 * NUMA support configuration.
109 * When set, the NUMA support attempts to dispatch the allocation of the
110 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
111 * probed ports among the CPU sockets 0 and 1.
112 * Otherwise, all memory is allocated from CPU socket 0.
114 uint8_t numa_support = 1; /**< numa enabled by default */
117 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
120 uint8_t socket_num = UMA_NO_CONFIG;
123 * Select mempool allocation type:
124 * - native: use regular DPDK memory
125 * - anon: use regular DPDK memory to create mempool, but populate using
126 * anonymous memory (may not be IOVA-contiguous)
127 * - xmem: use externally allocated hugepage memory
129 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
132 * Store specified sockets on which memory pool to be used by ports
135 uint8_t port_numa[RTE_MAX_ETHPORTS];
138 * Store specified sockets on which RX ring to be used by ports
141 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
144 * Store specified sockets on which TX ring to be used by ports
147 uint8_t txring_numa[RTE_MAX_ETHPORTS];
150 * Record the Ethernet address of peer target ports to which packets are
152 * Must be instantiated with the ethernet addresses of peer traffic generator
155 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
156 portid_t nb_peer_eth_addrs = 0;
159 * Probed Target Environment.
161 struct rte_port *ports; /**< For all probed ethernet ports. */
162 portid_t nb_ports; /**< Number of probed ethernet ports. */
163 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
164 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
166 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
169 * Test Forwarding Configuration.
170 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
171 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
173 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
174 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
175 portid_t nb_cfg_ports; /**< Number of configured ports. */
176 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
178 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
179 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
181 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
182 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
185 * Forwarding engines.
187 struct fwd_engine * fwd_engines[] = {
197 &five_tuple_swap_fwd_engine,
198 #ifdef RTE_LIBRTE_IEEE1588
199 &ieee1588_fwd_engine,
205 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
206 uint16_t mempool_flags;
208 struct fwd_config cur_fwd_config;
209 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
210 uint32_t retry_enabled;
211 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
212 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
214 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
215 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
216 DEFAULT_MBUF_DATA_SIZE
217 }; /**< Mbuf data space size. */
218 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
219 * specified on command-line. */
220 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
222 /** Extended statistics to show. */
223 struct rte_eth_xstat_name *xstats_display;
225 unsigned int xstats_display_num; /**< Size of extended statistics to show */
228 * In container, it cannot terminate the process which running with 'stats-period'
229 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
232 uint8_t cl_quit; /* Quit testpmd from cmdline. */
235 * Max Rx frame size, set by '--max-pkt-len' parameter.
237 uint32_t max_rx_pkt_len;
240 * Configuration of packet segments used to scatter received packets
241 * if some of split features is configured.
243 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
244 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
245 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
246 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
249 * Configuration of packet segments used by the "txonly" processing engine.
251 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
252 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
253 TXONLY_DEF_PACKET_LEN,
255 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
257 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
258 /**< Split policy for packets to TX. */
260 uint8_t txonly_multi_flow;
261 /**< Whether multiple flows are generated in TXONLY mode. */
263 uint32_t tx_pkt_times_inter;
264 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
266 uint32_t tx_pkt_times_intra;
267 /**< Timings for send scheduling in TXONLY mode, time between packets. */
269 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
270 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
271 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
272 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
274 /* current configuration is in DCB or not,0 means it is not in DCB mode */
275 uint8_t dcb_config = 0;
278 * Configurable number of RX/TX queues.
280 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
281 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
282 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
285 * Configurable number of RX/TX ring descriptors.
286 * Defaults are supplied by drivers via ethdev.
288 #define RTE_TEST_RX_DESC_DEFAULT 0
289 #define RTE_TEST_TX_DESC_DEFAULT 0
290 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
291 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
293 #define RTE_PMD_PARAM_UNSET -1
295 * Configurable values of RX and TX ring threshold registers.
298 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
299 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
300 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
302 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
303 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
304 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
307 * Configurable value of RX free threshold.
309 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
312 * Configurable value of RX drop enable.
314 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
317 * Configurable value of TX free threshold.
319 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
322 * Configurable value of TX RS bit threshold.
324 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
327 * Configurable value of buffered packets before sending.
329 uint16_t noisy_tx_sw_bufsz;
332 * Configurable value of packet buffer timeout.
334 uint16_t noisy_tx_sw_buf_flush_time;
337 * Configurable value for size of VNF internal memory area
338 * used for simulating noisy neighbour behaviour
340 uint64_t noisy_lkup_mem_sz;
343 * Configurable value of number of random writes done in
344 * VNF simulation memory area.
346 uint64_t noisy_lkup_num_writes;
349 * Configurable value of number of random reads done in
350 * VNF simulation memory area.
352 uint64_t noisy_lkup_num_reads;
355 * Configurable value of number of random reads/writes done in
356 * VNF simulation memory area.
358 uint64_t noisy_lkup_num_reads_writes;
361 * Receive Side Scaling (RSS) configuration.
363 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
366 * Port topology configuration
368 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
371 * Avoids to flush all the RX streams before starts forwarding.
373 uint8_t no_flush_rx = 0; /* flush by default */
376 * Flow API isolated mode.
378 uint8_t flow_isolate_all;
381 * Avoids to check link status when starting/stopping a port.
383 uint8_t no_link_check = 0; /* check by default */
386 * Don't automatically start all ports in interactive mode.
388 uint8_t no_device_start = 0;
391 * Enable link status change notification
393 uint8_t lsc_interrupt = 1; /* enabled by default */
396 * Enable device removal notification.
398 uint8_t rmv_interrupt = 1; /* enabled by default */
400 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
402 /* After attach, port setup is called on event or by iterator */
403 bool setup_on_probe_event = true;
405 /* Clear ptypes on port initialization. */
406 uint8_t clear_ptypes = true;
408 /* Hairpin ports configuration mode. */
409 uint16_t hairpin_mode;
411 /* Pretty printing of ethdev events */
412 static const char * const eth_event_desc[] = {
413 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
414 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
415 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
416 [RTE_ETH_EVENT_INTR_RESET] = "reset",
417 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
418 [RTE_ETH_EVENT_IPSEC] = "IPsec",
419 [RTE_ETH_EVENT_MACSEC] = "MACsec",
420 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
421 [RTE_ETH_EVENT_NEW] = "device probed",
422 [RTE_ETH_EVENT_DESTROY] = "device released",
423 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
424 [RTE_ETH_EVENT_RX_AVAIL_THRESH] = "RxQ available descriptors threshold reached",
425 [RTE_ETH_EVENT_MAX] = NULL,
429 * Display or mask ether events
430 * Default to all events except VF_MBOX
432 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
433 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
434 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
435 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
436 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
437 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
438 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
439 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
441 * Decide if all memory are locked for performance.
446 * NIC bypass mode configuration options.
449 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
450 /* The NIC bypass watchdog timeout. */
451 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
455 #ifdef RTE_LIB_LATENCYSTATS
458 * Set when latency stats is enabled in the commandline
460 uint8_t latencystats_enabled;
463 * Lcore ID to service latency statistics.
465 lcoreid_t latencystats_lcore_id = -1;
470 * Ethernet device configuration.
472 struct rte_eth_rxmode rx_mode;
474 struct rte_eth_txmode tx_mode = {
475 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
478 struct rte_eth_fdir_conf fdir_conf = {
479 .mode = RTE_FDIR_MODE_NONE,
480 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
481 .status = RTE_FDIR_REPORT_STATUS,
483 .vlan_tci_mask = 0xFFEF,
485 .src_ip = 0xFFFFFFFF,
486 .dst_ip = 0xFFFFFFFF,
489 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
490 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
492 .src_port_mask = 0xFFFF,
493 .dst_port_mask = 0xFFFF,
494 .mac_addr_byte_mask = 0xFF,
495 .tunnel_type_mask = 1,
496 .tunnel_id_mask = 0xFFFFFFFF,
501 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
504 * Display zero values by default for xstats
506 uint8_t xstats_hide_zero;
509 * Measure of CPU cycles disabled by default
511 uint8_t record_core_cycles;
514 * Display of RX and TX bursts disabled by default
516 uint8_t record_burst_stats;
519 * Number of ports per shared Rx queue group, 0 disable.
523 unsigned int num_sockets = 0;
524 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
526 #ifdef RTE_LIB_BITRATESTATS
527 /* Bitrate statistics */
528 struct rte_stats_bitrates *bitrate_data;
529 lcoreid_t bitrate_lcore_id;
530 uint8_t bitrate_enabled;
534 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
535 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
539 * hexadecimal bitmask of RX mq mode can be enabled.
541 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
544 * Used to set forced link speed
546 uint32_t eth_link_speed;
549 * ID of the current process in multi-process, used to
550 * configure the queues to be polled.
555 * Number of processes in multi-process, used to
556 * configure the queues to be polled.
558 unsigned int num_procs = 1;
561 eth_rx_metadata_negotiate_mp(uint16_t port_id)
563 uint64_t rx_meta_features = 0;
566 if (!is_proc_primary())
569 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
570 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
571 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
573 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
575 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
576 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
580 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
581 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
585 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
586 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
589 } else if (ret != -ENOTSUP) {
590 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
591 port_id, rte_strerror(-ret));
596 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
597 const struct rte_eth_conf *dev_conf)
599 if (is_proc_primary())
600 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
606 change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
610 portid_t slave_pids[RTE_MAX_ETHPORTS];
611 struct rte_port *port;
616 num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
618 if (num_slaves < 0) {
619 fprintf(stderr, "Failed to get slave list for port = %u\n",
624 for (i = 0; i < num_slaves; i++) {
625 slave_pid = slave_pids[i];
626 port = &ports[slave_pid];
628 is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
631 RTE_SET_USED(bond_pid);
632 RTE_SET_USED(is_stop);
638 eth_dev_start_mp(uint16_t port_id)
642 if (is_proc_primary()) {
643 ret = rte_eth_dev_start(port_id);
647 struct rte_port *port = &ports[port_id];
650 * Starting a bonded port also starts all slaves under the bonded
651 * device. So if this port is bond device, we need to modify the
652 * port status of these slaves.
654 if (port->bond_flag == 1)
655 return change_bonding_slave_port_status(port_id, false);
662 eth_dev_stop_mp(uint16_t port_id)
666 if (is_proc_primary()) {
667 ret = rte_eth_dev_stop(port_id);
671 struct rte_port *port = &ports[port_id];
674 * Stopping a bonded port also stops all slaves under the bonded
675 * device. So if this port is bond device, we need to modify the
676 * port status of these slaves.
678 if (port->bond_flag == 1)
679 return change_bonding_slave_port_status(port_id, true);
686 mempool_free_mp(struct rte_mempool *mp)
688 if (is_proc_primary())
689 rte_mempool_free(mp);
693 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
695 if (is_proc_primary())
696 return rte_eth_dev_set_mtu(port_id, mtu);
701 /* Forward function declarations */
702 static void setup_attached_port(portid_t pi);
703 static void check_all_ports_link_status(uint32_t port_mask);
704 static int eth_event_callback(portid_t port_id,
705 enum rte_eth_event_type type,
706 void *param, void *ret_param);
707 static void dev_event_callback(const char *device_name,
708 enum rte_dev_event_type type,
710 static void fill_xstats_display_info(void);
713 * Check if all the ports are started.
714 * If yes, return positive value. If not, return zero.
716 static int all_ports_started(void);
719 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
720 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
723 /* Holds the registered mbuf dynamic flags names. */
724 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
728 * Helper function to check if socket is already discovered.
729 * If yes, return positive value. If not, return zero.
732 new_socket_id(unsigned int socket_id)
736 for (i = 0; i < num_sockets; i++) {
737 if (socket_ids[i] == socket_id)
744 * Setup default configuration.
747 set_default_fwd_lcores_config(void)
751 unsigned int sock_num;
754 for (i = 0; i < RTE_MAX_LCORE; i++) {
755 if (!rte_lcore_is_enabled(i))
757 sock_num = rte_lcore_to_socket_id(i);
758 if (new_socket_id(sock_num)) {
759 if (num_sockets >= RTE_MAX_NUMA_NODES) {
760 rte_exit(EXIT_FAILURE,
761 "Total sockets greater than %u\n",
764 socket_ids[num_sockets++] = sock_num;
766 if (i == rte_get_main_lcore())
768 fwd_lcores_cpuids[nb_lc++] = i;
770 nb_lcores = (lcoreid_t) nb_lc;
771 nb_cfg_lcores = nb_lcores;
776 set_def_peer_eth_addrs(void)
780 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
781 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
782 peer_eth_addrs[i].addr_bytes[5] = i;
787 set_default_fwd_ports_config(void)
792 RTE_ETH_FOREACH_DEV(pt_id) {
793 fwd_ports_ids[i++] = pt_id;
795 /* Update sockets info according to the attached device */
796 int socket_id = rte_eth_dev_socket_id(pt_id);
797 if (socket_id >= 0 && new_socket_id(socket_id)) {
798 if (num_sockets >= RTE_MAX_NUMA_NODES) {
799 rte_exit(EXIT_FAILURE,
800 "Total sockets greater than %u\n",
803 socket_ids[num_sockets++] = socket_id;
807 nb_cfg_ports = nb_ports;
808 nb_fwd_ports = nb_ports;
812 set_def_fwd_config(void)
814 set_default_fwd_lcores_config();
815 set_def_peer_eth_addrs();
816 set_default_fwd_ports_config();
819 #ifndef RTE_EXEC_ENV_WINDOWS
820 /* extremely pessimistic estimation of memory required to create a mempool */
822 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
824 unsigned int n_pages, mbuf_per_pg, leftover;
825 uint64_t total_mem, mbuf_mem, obj_sz;
827 /* there is no good way to predict how much space the mempool will
828 * occupy because it will allocate chunks on the fly, and some of those
829 * will come from default DPDK memory while some will come from our
830 * external memory, so just assume 128MB will be enough for everyone.
832 uint64_t hdr_mem = 128 << 20;
834 /* account for possible non-contiguousness */
835 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
837 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
841 mbuf_per_pg = pgsz / obj_sz;
842 leftover = (nb_mbufs % mbuf_per_pg) > 0;
843 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
845 mbuf_mem = n_pages * pgsz;
847 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
849 if (total_mem > SIZE_MAX) {
850 TESTPMD_LOG(ERR, "Memory size too big\n");
853 *out = (size_t)total_mem;
859 pagesz_flags(uint64_t page_sz)
861 /* as per mmap() manpage, all page sizes are log2 of page size
862 * shifted by MAP_HUGE_SHIFT
864 int log2 = rte_log2_u64(page_sz);
866 return (log2 << HUGE_SHIFT);
870 alloc_mem(size_t memsz, size_t pgsz, bool huge)
875 /* allocate anonymous hugepages */
876 flags = MAP_ANONYMOUS | MAP_PRIVATE;
878 flags |= HUGE_FLAG | pagesz_flags(pgsz);
880 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
881 if (addr == MAP_FAILED)
887 struct extmem_param {
891 rte_iova_t *iova_table;
892 unsigned int iova_table_len;
896 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
899 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
900 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
901 unsigned int cur_page, n_pages, pgsz_idx;
902 size_t mem_sz, cur_pgsz;
903 rte_iova_t *iovas = NULL;
907 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
908 /* skip anything that is too big */
909 if (pgsizes[pgsz_idx] > SIZE_MAX)
912 cur_pgsz = pgsizes[pgsz_idx];
914 /* if we were told not to allocate hugepages, override */
916 cur_pgsz = sysconf(_SC_PAGESIZE);
918 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
920 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
924 /* allocate our memory */
925 addr = alloc_mem(mem_sz, cur_pgsz, huge);
927 /* if we couldn't allocate memory with a specified page size,
928 * that doesn't mean we can't do it with other page sizes, so
934 /* store IOVA addresses for every page in this memory area */
935 n_pages = mem_sz / cur_pgsz;
937 iovas = malloc(sizeof(*iovas) * n_pages);
940 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
943 /* lock memory if it's not huge pages */
947 /* populate IOVA addresses */
948 for (cur_page = 0; cur_page < n_pages; cur_page++) {
953 offset = cur_pgsz * cur_page;
954 cur = RTE_PTR_ADD(addr, offset);
956 /* touch the page before getting its IOVA */
957 *(volatile char *)cur = 0;
959 iova = rte_mem_virt2iova(cur);
961 iovas[cur_page] = iova;
966 /* if we couldn't allocate anything */
972 param->pgsz = cur_pgsz;
973 param->iova_table = iovas;
974 param->iova_table_len = n_pages;
980 munmap(addr, mem_sz);
986 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
988 struct extmem_param param;
991 memset(¶m, 0, sizeof(param));
993 /* check if our heap exists */
994 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
996 /* create our heap */
997 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
999 TESTPMD_LOG(ERR, "Cannot create heap\n");
1004 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
1006 TESTPMD_LOG(ERR, "Cannot create memory area\n");
1010 /* we now have a valid memory area, so add it to heap */
1011 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1012 param.addr, param.len, param.iova_table,
1013 param.iova_table_len, param.pgsz);
1015 /* when using VFIO, memory is automatically mapped for DMA by EAL */
1017 /* not needed any more */
1018 free(param.iova_table);
1021 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1022 munmap(param.addr, param.len);
1028 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1034 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1035 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1040 RTE_ETH_FOREACH_DEV(pid) {
1041 struct rte_eth_dev_info dev_info;
1043 ret = eth_dev_info_get_print_err(pid, &dev_info);
1046 "unable to get device info for port %d on addr 0x%p,"
1047 "mempool unmapping will not be performed\n",
1052 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
1055 "unable to DMA unmap addr 0x%p "
1057 memhdr->addr, dev_info.device->name);
1060 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1063 "unable to un-register addr 0x%p\n", memhdr->addr);
1068 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1069 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1072 size_t page_size = sysconf(_SC_PAGESIZE);
1075 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1079 "unable to register addr 0x%p\n", memhdr->addr);
1082 RTE_ETH_FOREACH_DEV(pid) {
1083 struct rte_eth_dev_info dev_info;
1085 ret = eth_dev_info_get_print_err(pid, &dev_info);
1088 "unable to get device info for port %d on addr 0x%p,"
1089 "mempool mapping will not be performed\n",
1093 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1096 "unable to DMA map addr 0x%p "
1098 memhdr->addr, dev_info.device->name);
1105 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1106 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1108 struct rte_pktmbuf_extmem *xmem;
1109 unsigned int ext_num, zone_num, elt_num;
1112 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1113 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1114 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1116 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1118 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1119 "external buffer descriptors\n");
1123 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1124 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1125 const struct rte_memzone *mz;
1126 char mz_name[RTE_MEMZONE_NAMESIZE];
1129 ret = snprintf(mz_name, sizeof(mz_name),
1130 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1131 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1132 errno = ENAMETOOLONG;
1136 mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
1138 RTE_MEMZONE_IOVA_CONTIG |
1140 RTE_MEMZONE_SIZE_HINT_ONLY);
1143 * The caller exits on external buffer creation
1144 * error, so there is no need to free memzones.
1150 xseg->buf_ptr = mz->addr;
1151 xseg->buf_iova = mz->iova;
1152 xseg->buf_len = EXTBUF_ZONE_SIZE;
1153 xseg->elt_size = elt_size;
1155 if (ext_num == 0 && xmem != NULL) {
1164 * Configuration initialisation done once at init time.
1166 static struct rte_mempool *
1167 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1168 unsigned int socket_id, uint16_t size_idx)
1170 char pool_name[RTE_MEMPOOL_NAMESIZE];
1171 struct rte_mempool *rte_mp = NULL;
1172 #ifndef RTE_EXEC_ENV_WINDOWS
1175 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1177 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1178 if (!is_proc_primary()) {
1179 rte_mp = rte_mempool_lookup(pool_name);
1181 rte_exit(EXIT_FAILURE,
1182 "Get mbuf pool for socket %u failed: %s\n",
1183 socket_id, rte_strerror(rte_errno));
1188 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1189 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1191 switch (mp_alloc_type) {
1192 case MP_ALLOC_NATIVE:
1194 /* wrapper to rte_mempool_create() */
1195 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1196 rte_mbuf_best_mempool_ops());
1197 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1198 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1201 #ifndef RTE_EXEC_ENV_WINDOWS
1204 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1205 mb_size, (unsigned int) mb_mempool_cache,
1206 sizeof(struct rte_pktmbuf_pool_private),
1207 socket_id, mempool_flags);
1211 if (rte_mempool_populate_anon(rte_mp) == 0) {
1212 rte_mempool_free(rte_mp);
1216 rte_pktmbuf_pool_init(rte_mp, NULL);
1217 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1218 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1222 case MP_ALLOC_XMEM_HUGE:
1225 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1227 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1228 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1231 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1232 if (heap_socket < 0)
1233 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1235 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1236 rte_mbuf_best_mempool_ops());
1237 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1238 mb_mempool_cache, 0, mbuf_seg_size,
1245 struct rte_pktmbuf_extmem *ext_mem;
1246 unsigned int ext_num;
1248 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1249 socket_id, pool_name, &ext_mem);
1251 rte_exit(EXIT_FAILURE,
1252 "Can't create pinned data buffers\n");
1254 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1255 rte_mbuf_best_mempool_ops());
1256 rte_mp = rte_pktmbuf_pool_create_extbuf
1257 (pool_name, nb_mbuf, mb_mempool_cache,
1258 0, mbuf_seg_size, socket_id,
1265 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1269 #ifndef RTE_EXEC_ENV_WINDOWS
1272 if (rte_mp == NULL) {
1273 rte_exit(EXIT_FAILURE,
1274 "Creation of mbuf pool for socket %u failed: %s\n",
1275 socket_id, rte_strerror(rte_errno));
1276 } else if (verbose_level > 0) {
1277 rte_mempool_dump(stdout, rte_mp);
1283 * Check given socket id is valid or not with NUMA mode,
1284 * if valid, return 0, else return -1
1287 check_socket_id(const unsigned int socket_id)
1289 static int warning_once = 0;
1291 if (new_socket_id(socket_id)) {
1292 if (!warning_once && numa_support)
1294 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1302 * Get the allowed maximum number of RX queues.
1303 * *pid return the port id which has minimal value of
1304 * max_rx_queues in all ports.
1307 get_allowed_max_nb_rxq(portid_t *pid)
1309 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1310 bool max_rxq_valid = false;
1312 struct rte_eth_dev_info dev_info;
1314 RTE_ETH_FOREACH_DEV(pi) {
1315 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1318 max_rxq_valid = true;
1319 if (dev_info.max_rx_queues < allowed_max_rxq) {
1320 allowed_max_rxq = dev_info.max_rx_queues;
1324 return max_rxq_valid ? allowed_max_rxq : 0;
1328 * Check input rxq is valid or not.
1329 * If input rxq is not greater than any of maximum number
1330 * of RX queues of all ports, it is valid.
1331 * if valid, return 0, else return -1
1334 check_nb_rxq(queueid_t rxq)
1336 queueid_t allowed_max_rxq;
1339 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1340 if (rxq > allowed_max_rxq) {
1342 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1343 rxq, allowed_max_rxq, pid);
1350 * Get the allowed maximum number of TX queues.
1351 * *pid return the port id which has minimal value of
1352 * max_tx_queues in all ports.
1355 get_allowed_max_nb_txq(portid_t *pid)
1357 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1358 bool max_txq_valid = false;
1360 struct rte_eth_dev_info dev_info;
1362 RTE_ETH_FOREACH_DEV(pi) {
1363 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1366 max_txq_valid = true;
1367 if (dev_info.max_tx_queues < allowed_max_txq) {
1368 allowed_max_txq = dev_info.max_tx_queues;
1372 return max_txq_valid ? allowed_max_txq : 0;
1376 * Check input txq is valid or not.
1377 * If input txq is not greater than any of maximum number
1378 * of TX queues of all ports, it is valid.
1379 * if valid, return 0, else return -1
1382 check_nb_txq(queueid_t txq)
1384 queueid_t allowed_max_txq;
1387 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1388 if (txq > allowed_max_txq) {
1390 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1391 txq, allowed_max_txq, pid);
1398 * Get the allowed maximum number of RXDs of every rx queue.
1399 * *pid return the port id which has minimal value of
1400 * max_rxd in all queues of all ports.
1403 get_allowed_max_nb_rxd(portid_t *pid)
1405 uint16_t allowed_max_rxd = UINT16_MAX;
1407 struct rte_eth_dev_info dev_info;
1409 RTE_ETH_FOREACH_DEV(pi) {
1410 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1413 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1414 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1418 return allowed_max_rxd;
1422 * Get the allowed minimal number of RXDs of every rx queue.
1423 * *pid return the port id which has minimal value of
1424 * min_rxd in all queues of all ports.
1427 get_allowed_min_nb_rxd(portid_t *pid)
1429 uint16_t allowed_min_rxd = 0;
1431 struct rte_eth_dev_info dev_info;
1433 RTE_ETH_FOREACH_DEV(pi) {
1434 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1437 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1438 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1443 return allowed_min_rxd;
1447 * Check input rxd is valid or not.
1448 * If input rxd is not greater than any of maximum number
1449 * of RXDs of every Rx queues and is not less than any of
1450 * minimal number of RXDs of every Rx queues, it is valid.
1451 * if valid, return 0, else return -1
1454 check_nb_rxd(queueid_t rxd)
1456 uint16_t allowed_max_rxd;
1457 uint16_t allowed_min_rxd;
1460 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1461 if (rxd > allowed_max_rxd) {
1463 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1464 rxd, allowed_max_rxd, pid);
1468 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1469 if (rxd < allowed_min_rxd) {
1471 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1472 rxd, allowed_min_rxd, pid);
1480 * Get the allowed maximum number of TXDs of every rx queues.
1481 * *pid return the port id which has minimal value of
1482 * max_txd in every tx queue.
1485 get_allowed_max_nb_txd(portid_t *pid)
1487 uint16_t allowed_max_txd = UINT16_MAX;
1489 struct rte_eth_dev_info dev_info;
1491 RTE_ETH_FOREACH_DEV(pi) {
1492 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1495 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1496 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1500 return allowed_max_txd;
1504 * Get the allowed maximum number of TXDs of every tx queues.
1505 * *pid return the port id which has minimal value of
1506 * min_txd in every tx queue.
1509 get_allowed_min_nb_txd(portid_t *pid)
1511 uint16_t allowed_min_txd = 0;
1513 struct rte_eth_dev_info dev_info;
1515 RTE_ETH_FOREACH_DEV(pi) {
1516 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1519 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1520 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1525 return allowed_min_txd;
1529 * Check input txd is valid or not.
1530 * If input txd is not greater than any of maximum number
1531 * of TXDs of every Rx queues, it is valid.
1532 * if valid, return 0, else return -1
1535 check_nb_txd(queueid_t txd)
1537 uint16_t allowed_max_txd;
1538 uint16_t allowed_min_txd;
1541 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1542 if (txd > allowed_max_txd) {
1544 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1545 txd, allowed_max_txd, pid);
1549 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1550 if (txd < allowed_min_txd) {
1552 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1553 txd, allowed_min_txd, pid);
1561 * Get the allowed maximum number of hairpin queues.
1562 * *pid return the port id which has minimal value of
1563 * max_hairpin_queues in all ports.
1566 get_allowed_max_nb_hairpinq(portid_t *pid)
1568 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1570 struct rte_eth_hairpin_cap cap;
1572 RTE_ETH_FOREACH_DEV(pi) {
1573 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1577 if (cap.max_nb_queues < allowed_max_hairpinq) {
1578 allowed_max_hairpinq = cap.max_nb_queues;
1582 return allowed_max_hairpinq;
1586 * Check input hairpin is valid or not.
1587 * If input hairpin is not greater than any of maximum number
1588 * of hairpin queues of all ports, it is valid.
1589 * if valid, return 0, else return -1
1592 check_nb_hairpinq(queueid_t hairpinq)
1594 queueid_t allowed_max_hairpinq;
1597 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1598 if (hairpinq > allowed_max_hairpinq) {
1600 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1601 hairpinq, allowed_max_hairpinq, pid);
1608 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1610 uint32_t eth_overhead;
1612 if (dev_info->max_mtu != UINT16_MAX &&
1613 dev_info->max_rx_pktlen > dev_info->max_mtu)
1614 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1616 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1618 return eth_overhead;
1622 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1624 struct rte_port *port = &ports[pid];
1628 eth_rx_metadata_negotiate_mp(pid);
1630 port->dev_conf.txmode = tx_mode;
1631 port->dev_conf.rxmode = rx_mode;
1633 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1635 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1637 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1638 port->dev_conf.txmode.offloads &=
1639 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1641 /* Apply Rx offloads configuration */
1642 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1643 port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1644 /* Apply Tx offloads configuration */
1645 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1646 port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1649 port->dev_conf.link_speeds = eth_link_speed;
1652 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1653 get_eth_overhead(&port->dev_info);
1655 /* set flag to initialize port/queue */
1656 port->need_reconfig = 1;
1657 port->need_reconfig_queues = 1;
1658 port->socket_id = socket_id;
1659 port->tx_metadata = 0;
1662 * Check for maximum number of segments per MTU.
1663 * Accordingly update the mbuf data size.
1665 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1666 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1667 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1670 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1671 uint16_t data_size = (mtu + eth_overhead) /
1672 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1673 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1675 if (buffer_size > mbuf_data_size[0]) {
1676 mbuf_data_size[0] = buffer_size;
1677 TESTPMD_LOG(WARNING,
1678 "Configured mbuf size of the first segment %hu\n",
1689 struct rte_mempool *mbp;
1690 unsigned int nb_mbuf_per_pool;
1693 struct rte_gro_param gro_param;
1699 /* Configuration of logical cores. */
1700 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1701 sizeof(struct fwd_lcore *) * nb_lcores,
1702 RTE_CACHE_LINE_SIZE);
1703 if (fwd_lcores == NULL) {
1704 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1705 "failed\n", nb_lcores);
1707 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1708 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1709 sizeof(struct fwd_lcore),
1710 RTE_CACHE_LINE_SIZE);
1711 if (fwd_lcores[lc_id] == NULL) {
1712 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1715 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1718 RTE_ETH_FOREACH_DEV(pid) {
1722 socket_id = port_numa[pid];
1723 if (port_numa[pid] == NUMA_NO_CONFIG) {
1724 socket_id = rte_eth_dev_socket_id(pid);
1727 * if socket_id is invalid,
1728 * set to the first available socket.
1730 if (check_socket_id(socket_id) < 0)
1731 socket_id = socket_ids[0];
1734 socket_id = (socket_num == UMA_NO_CONFIG) ?
1737 /* Apply default TxRx configuration for all ports */
1738 init_config_port_offloads(pid, socket_id);
1741 * Create pools of mbuf.
1742 * If NUMA support is disabled, create a single pool of mbuf in
1743 * socket 0 memory by default.
1744 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1746 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1747 * nb_txd can be configured at run time.
1749 if (param_total_num_mbufs)
1750 nb_mbuf_per_pool = param_total_num_mbufs;
1752 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1753 (nb_lcores * mb_mempool_cache) +
1754 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1755 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1761 for (i = 0; i < num_sockets; i++)
1762 for (j = 0; j < mbuf_data_size_n; j++)
1763 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1764 mbuf_pool_create(mbuf_data_size[j],
1770 for (i = 0; i < mbuf_data_size_n; i++)
1771 mempools[i] = mbuf_pool_create
1774 socket_num == UMA_NO_CONFIG ?
1781 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1782 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1785 * Records which Mbuf pool to use by each logical core, if needed.
1787 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1788 mbp = mbuf_pool_find(
1789 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1792 mbp = mbuf_pool_find(0, 0);
1793 fwd_lcores[lc_id]->mbp = mbp;
1795 /* initialize GSO context */
1796 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1797 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1798 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1799 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1801 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1808 /* create a gro context for each lcore */
1809 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1810 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1811 gro_param.max_item_per_flow = MAX_PKT_BURST;
1812 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1813 gro_param.socket_id = rte_lcore_to_socket_id(
1814 fwd_lcores_cpuids[lc_id]);
1815 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1816 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1817 rte_exit(EXIT_FAILURE,
1818 "rte_gro_ctx_create() failed\n");
1826 reconfig(portid_t new_port_id, unsigned socket_id)
1828 /* Reconfiguration of Ethernet ports. */
1829 init_config_port_offloads(new_port_id, socket_id);
1834 init_fwd_streams(void)
1837 struct rte_port *port;
1838 streamid_t sm_id, nb_fwd_streams_new;
1841 /* set socket id according to numa or not */
1842 RTE_ETH_FOREACH_DEV(pid) {
1844 if (nb_rxq > port->dev_info.max_rx_queues) {
1846 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1847 nb_rxq, port->dev_info.max_rx_queues);
1850 if (nb_txq > port->dev_info.max_tx_queues) {
1852 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1853 nb_txq, port->dev_info.max_tx_queues);
1857 if (port_numa[pid] != NUMA_NO_CONFIG)
1858 port->socket_id = port_numa[pid];
1860 port->socket_id = rte_eth_dev_socket_id(pid);
1863 * if socket_id is invalid,
1864 * set to the first available socket.
1866 if (check_socket_id(port->socket_id) < 0)
1867 port->socket_id = socket_ids[0];
1871 if (socket_num == UMA_NO_CONFIG)
1872 port->socket_id = 0;
1874 port->socket_id = socket_num;
1878 q = RTE_MAX(nb_rxq, nb_txq);
1881 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1884 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1885 if (nb_fwd_streams_new == nb_fwd_streams)
1888 if (fwd_streams != NULL) {
1889 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1890 if (fwd_streams[sm_id] == NULL)
1892 rte_free(fwd_streams[sm_id]);
1893 fwd_streams[sm_id] = NULL;
1895 rte_free(fwd_streams);
1900 nb_fwd_streams = nb_fwd_streams_new;
1901 if (nb_fwd_streams) {
1902 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1903 sizeof(struct fwd_stream *) * nb_fwd_streams,
1904 RTE_CACHE_LINE_SIZE);
1905 if (fwd_streams == NULL)
1906 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1907 " (struct fwd_stream *)) failed\n",
1910 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1911 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1912 " struct fwd_stream", sizeof(struct fwd_stream),
1913 RTE_CACHE_LINE_SIZE);
1914 if (fwd_streams[sm_id] == NULL)
1915 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1916 "(struct fwd_stream) failed\n");
1924 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1926 uint64_t total_burst, sburst;
1928 uint64_t burst_stats[4];
1929 uint16_t pktnb_stats[4];
1931 int burst_percent[4], sburstp;
1935 * First compute the total number of packet bursts and the
1936 * two highest numbers of bursts of the same number of packets.
1938 memset(&burst_stats, 0x0, sizeof(burst_stats));
1939 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1941 /* Show stats for 0 burst size always */
1942 total_burst = pbs->pkt_burst_spread[0];
1943 burst_stats[0] = pbs->pkt_burst_spread[0];
1946 /* Find the next 2 burst sizes with highest occurrences. */
1947 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1948 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1953 total_burst += nb_burst;
1955 if (nb_burst > burst_stats[1]) {
1956 burst_stats[2] = burst_stats[1];
1957 pktnb_stats[2] = pktnb_stats[1];
1958 burst_stats[1] = nb_burst;
1959 pktnb_stats[1] = nb_pkt;
1960 } else if (nb_burst > burst_stats[2]) {
1961 burst_stats[2] = nb_burst;
1962 pktnb_stats[2] = nb_pkt;
1965 if (total_burst == 0)
1968 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1969 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1971 printf("%d%% of other]\n", 100 - sburstp);
1975 sburst += burst_stats[i];
1976 if (sburst == total_burst) {
1977 printf("%d%% of %d pkts]\n",
1978 100 - sburstp, (int) pktnb_stats[i]);
1983 (double)burst_stats[i] / total_burst * 100;
1984 printf("%d%% of %d pkts + ",
1985 burst_percent[i], (int) pktnb_stats[i]);
1986 sburstp += burst_percent[i];
1991 fwd_stream_stats_display(streamid_t stream_id)
1993 struct fwd_stream *fs;
1994 static const char *fwd_top_stats_border = "-------";
1996 fs = fwd_streams[stream_id];
1997 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1998 (fs->fwd_dropped == 0))
2000 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
2001 "TX Port=%2d/Queue=%2d %s\n",
2002 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2003 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2004 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2005 " TX-dropped: %-14"PRIu64,
2006 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2008 /* if checksum mode */
2009 if (cur_fwd_eng == &csum_fwd_engine) {
2010 printf(" RX- bad IP checksum: %-14"PRIu64
2011 " Rx- bad L4 checksum: %-14"PRIu64
2012 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
2013 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
2014 fs->rx_bad_outer_l4_csum);
2015 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2016 fs->rx_bad_outer_ip_csum);
2021 if (record_burst_stats) {
2022 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2023 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
2028 fwd_stats_display(void)
2030 static const char *fwd_stats_border = "----------------------";
2031 static const char *acc_stats_border = "+++++++++++++++";
2033 struct fwd_stream *rx_stream;
2034 struct fwd_stream *tx_stream;
2035 uint64_t tx_dropped;
2036 uint64_t rx_bad_ip_csum;
2037 uint64_t rx_bad_l4_csum;
2038 uint64_t rx_bad_outer_l4_csum;
2039 uint64_t rx_bad_outer_ip_csum;
2040 } ports_stats[RTE_MAX_ETHPORTS];
2041 uint64_t total_rx_dropped = 0;
2042 uint64_t total_tx_dropped = 0;
2043 uint64_t total_rx_nombuf = 0;
2044 struct rte_eth_stats stats;
2045 uint64_t fwd_cycles = 0;
2046 uint64_t total_recv = 0;
2047 uint64_t total_xmit = 0;
2048 struct rte_port *port;
2054 memset(ports_stats, 0, sizeof(ports_stats));
2056 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2057 struct fwd_stream *fs = fwd_streams[sm_id];
2059 if (cur_fwd_config.nb_fwd_streams >
2060 cur_fwd_config.nb_fwd_ports) {
2061 fwd_stream_stats_display(sm_id);
2063 ports_stats[fs->tx_port].tx_stream = fs;
2064 ports_stats[fs->rx_port].rx_stream = fs;
2067 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2069 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2070 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2071 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2072 fs->rx_bad_outer_l4_csum;
2073 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2074 fs->rx_bad_outer_ip_csum;
2076 if (record_core_cycles)
2077 fwd_cycles += fs->core_cycles;
2079 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2080 pt_id = fwd_ports_ids[i];
2081 port = &ports[pt_id];
2083 ret = rte_eth_stats_get(pt_id, &stats);
2086 "%s: Error: failed to get stats (port %u): %d",
2087 __func__, pt_id, ret);
2090 stats.ipackets -= port->stats.ipackets;
2091 stats.opackets -= port->stats.opackets;
2092 stats.ibytes -= port->stats.ibytes;
2093 stats.obytes -= port->stats.obytes;
2094 stats.imissed -= port->stats.imissed;
2095 stats.oerrors -= port->stats.oerrors;
2096 stats.rx_nombuf -= port->stats.rx_nombuf;
2098 total_recv += stats.ipackets;
2099 total_xmit += stats.opackets;
2100 total_rx_dropped += stats.imissed;
2101 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2102 total_tx_dropped += stats.oerrors;
2103 total_rx_nombuf += stats.rx_nombuf;
2105 printf("\n %s Forward statistics for port %-2d %s\n",
2106 fwd_stats_border, pt_id, fwd_stats_border);
2108 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2109 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2110 stats.ipackets + stats.imissed);
2112 if (cur_fwd_eng == &csum_fwd_engine) {
2113 printf(" Bad-ipcsum: %-14"PRIu64
2114 " Bad-l4csum: %-14"PRIu64
2115 "Bad-outer-l4csum: %-14"PRIu64"\n",
2116 ports_stats[pt_id].rx_bad_ip_csum,
2117 ports_stats[pt_id].rx_bad_l4_csum,
2118 ports_stats[pt_id].rx_bad_outer_l4_csum);
2119 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2120 ports_stats[pt_id].rx_bad_outer_ip_csum);
2122 if (stats.ierrors + stats.rx_nombuf > 0) {
2123 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2124 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2127 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2128 "TX-total: %-"PRIu64"\n",
2129 stats.opackets, ports_stats[pt_id].tx_dropped,
2130 stats.opackets + ports_stats[pt_id].tx_dropped);
2132 if (record_burst_stats) {
2133 if (ports_stats[pt_id].rx_stream)
2134 pkt_burst_stats_display("RX",
2135 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2136 if (ports_stats[pt_id].tx_stream)
2137 pkt_burst_stats_display("TX",
2138 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2141 printf(" %s--------------------------------%s\n",
2142 fwd_stats_border, fwd_stats_border);
2145 printf("\n %s Accumulated forward statistics for all ports"
2147 acc_stats_border, acc_stats_border);
2148 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2150 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2152 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2153 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2154 if (total_rx_nombuf > 0)
2155 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2156 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2158 acc_stats_border, acc_stats_border);
2159 if (record_core_cycles) {
2160 #define CYC_PER_MHZ 1E6
2161 if (total_recv > 0 || total_xmit > 0) {
2162 uint64_t total_pkts = 0;
2163 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2164 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2165 total_pkts = total_xmit;
2167 total_pkts = total_recv;
2169 printf("\n CPU cycles/packet=%.2F (total cycles="
2170 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2172 (double) fwd_cycles / total_pkts,
2173 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2174 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2180 fwd_stats_reset(void)
2187 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2188 pt_id = fwd_ports_ids[i];
2189 ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2192 "%s: Error: failed to clear stats (port %u):%d",
2193 __func__, pt_id, ret);
2195 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2196 struct fwd_stream *fs = fwd_streams[sm_id];
2200 fs->fwd_dropped = 0;
2201 fs->rx_bad_ip_csum = 0;
2202 fs->rx_bad_l4_csum = 0;
2203 fs->rx_bad_outer_l4_csum = 0;
2204 fs->rx_bad_outer_ip_csum = 0;
2206 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2207 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2208 fs->core_cycles = 0;
2213 flush_fwd_rx_queues(void)
2215 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2222 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2223 uint64_t timer_period;
2225 if (num_procs > 1) {
2226 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2230 /* convert to number of cycles */
2231 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2233 for (j = 0; j < 2; j++) {
2234 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2235 for (rxq = 0; rxq < nb_rxq; rxq++) {
2236 port_id = fwd_ports_ids[rxp];
2238 /* Polling stopped queues is prohibited. */
2239 if (ports[port_id].rxq[rxq].state ==
2240 RTE_ETH_QUEUE_STATE_STOPPED)
2244 * testpmd can stuck in the below do while loop
2245 * if rte_eth_rx_burst() always returns nonzero
2246 * packets. So timer is added to exit this loop
2247 * after 1sec timer expiry.
2249 prev_tsc = rte_rdtsc();
2251 nb_rx = rte_eth_rx_burst(port_id, rxq,
2252 pkts_burst, MAX_PKT_BURST);
2253 for (i = 0; i < nb_rx; i++)
2254 rte_pktmbuf_free(pkts_burst[i]);
2256 cur_tsc = rte_rdtsc();
2257 diff_tsc = cur_tsc - prev_tsc;
2258 timer_tsc += diff_tsc;
2259 } while ((nb_rx > 0) &&
2260 (timer_tsc < timer_period));
2264 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2269 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2271 struct fwd_stream **fsm;
2274 #ifdef RTE_LIB_BITRATESTATS
2275 uint64_t tics_per_1sec;
2276 uint64_t tics_datum;
2277 uint64_t tics_current;
2278 uint16_t i, cnt_ports;
2280 cnt_ports = nb_ports;
2281 tics_datum = rte_rdtsc();
2282 tics_per_1sec = rte_get_timer_hz();
2284 fsm = &fwd_streams[fc->stream_idx];
2285 nb_fs = fc->stream_nb;
2287 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2288 if (!fsm[sm_id]->disabled)
2289 (*pkt_fwd)(fsm[sm_id]);
2290 #ifdef RTE_LIB_BITRATESTATS
2291 if (bitrate_enabled != 0 &&
2292 bitrate_lcore_id == rte_lcore_id()) {
2293 tics_current = rte_rdtsc();
2294 if (tics_current - tics_datum >= tics_per_1sec) {
2295 /* Periodic bitrate calculation */
2296 for (i = 0; i < cnt_ports; i++)
2297 rte_stats_bitrate_calc(bitrate_data,
2299 tics_datum = tics_current;
2303 #ifdef RTE_LIB_LATENCYSTATS
2304 if (latencystats_enabled != 0 &&
2305 latencystats_lcore_id == rte_lcore_id())
2306 rte_latencystats_update();
2309 } while (! fc->stopped);
2313 start_pkt_forward_on_core(void *fwd_arg)
2315 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2316 cur_fwd_config.fwd_eng->packet_fwd);
2321 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2322 * Used to start communication flows in network loopback test configurations.
2325 run_one_txonly_burst_on_core(void *fwd_arg)
2327 struct fwd_lcore *fwd_lc;
2328 struct fwd_lcore tmp_lcore;
2330 fwd_lc = (struct fwd_lcore *) fwd_arg;
2331 tmp_lcore = *fwd_lc;
2332 tmp_lcore.stopped = 1;
2333 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2338 * Launch packet forwarding:
2339 * - Setup per-port forwarding context.
2340 * - launch logical cores with their forwarding configuration.
2343 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2349 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2350 lc_id = fwd_lcores_cpuids[i];
2351 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2352 fwd_lcores[i]->stopped = 0;
2353 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2354 fwd_lcores[i], lc_id);
2357 "launch lcore %u failed - diag=%d\n",
2364 * Launch packet forwarding configuration.
2367 start_packet_forwarding(int with_tx_first)
2369 port_fwd_begin_t port_fwd_begin;
2370 port_fwd_end_t port_fwd_end;
2371 stream_init_t stream_init = cur_fwd_eng->stream_init;
2374 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2375 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2377 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2378 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2380 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2381 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2382 (!nb_rxq || !nb_txq))
2383 rte_exit(EXIT_FAILURE,
2384 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2385 cur_fwd_eng->fwd_mode_name);
2387 if (all_ports_started() == 0) {
2388 fprintf(stderr, "Not all ports were started\n");
2391 if (test_done == 0) {
2392 fprintf(stderr, "Packet forwarding already started\n");
2398 pkt_fwd_config_display(&cur_fwd_config);
2399 if (!pkt_fwd_shared_rxq_check())
2402 if (stream_init != NULL)
2403 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
2404 stream_init(fwd_streams[i]);
2406 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2407 if (port_fwd_begin != NULL) {
2408 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2409 if (port_fwd_begin(fwd_ports_ids[i])) {
2411 "Packet forwarding is not ready\n");
2417 if (with_tx_first) {
2418 port_fwd_begin = tx_only_engine.port_fwd_begin;
2419 if (port_fwd_begin != NULL) {
2420 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2421 if (port_fwd_begin(fwd_ports_ids[i])) {
2423 "Packet forwarding is not ready\n");
2433 flush_fwd_rx_queues();
2435 rxtx_config_display();
2438 if (with_tx_first) {
2439 while (with_tx_first--) {
2440 launch_packet_forwarding(
2441 run_one_txonly_burst_on_core);
2442 rte_eal_mp_wait_lcore();
2444 port_fwd_end = tx_only_engine.port_fwd_end;
2445 if (port_fwd_end != NULL) {
2446 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2447 (*port_fwd_end)(fwd_ports_ids[i]);
2450 launch_packet_forwarding(start_pkt_forward_on_core);
2454 stop_packet_forwarding(void)
2456 port_fwd_end_t port_fwd_end;
2462 fprintf(stderr, "Packet forwarding not started\n");
2465 printf("Telling cores to stop...");
2466 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2467 fwd_lcores[lc_id]->stopped = 1;
2468 printf("\nWaiting for lcores to finish...\n");
2469 rte_eal_mp_wait_lcore();
2470 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2471 if (port_fwd_end != NULL) {
2472 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2473 pt_id = fwd_ports_ids[i];
2474 (*port_fwd_end)(pt_id);
2478 fwd_stats_display();
2480 printf("\nDone.\n");
2485 dev_set_link_up(portid_t pid)
2487 if (rte_eth_dev_set_link_up(pid) < 0)
2488 fprintf(stderr, "\nSet link up fail.\n");
2492 dev_set_link_down(portid_t pid)
2494 if (rte_eth_dev_set_link_down(pid) < 0)
2495 fprintf(stderr, "\nSet link down fail.\n");
2499 all_ports_started(void)
2502 struct rte_port *port;
2504 RTE_ETH_FOREACH_DEV(pi) {
2506 /* Check if there is a port which is not started */
2507 if ((port->port_status != RTE_PORT_STARTED) &&
2508 (port->slave_flag == 0))
2512 /* No port is not started */
2517 port_is_stopped(portid_t port_id)
2519 struct rte_port *port = &ports[port_id];
2521 if ((port->port_status != RTE_PORT_STOPPED) &&
2522 (port->slave_flag == 0))
2528 all_ports_stopped(void)
2532 RTE_ETH_FOREACH_DEV(pi) {
2533 if (!port_is_stopped(pi))
2541 port_is_started(portid_t port_id)
2543 if (port_id_is_invalid(port_id, ENABLED_WARN))
2546 if (ports[port_id].port_status != RTE_PORT_STARTED)
2552 /* Configure the Rx and Tx hairpin queues for the selected port. */
2554 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2557 struct rte_eth_hairpin_conf hairpin_conf = {
2562 struct rte_port *port = &ports[pi];
2563 uint16_t peer_rx_port = pi;
2564 uint16_t peer_tx_port = pi;
2565 uint32_t manual = 1;
2566 uint32_t tx_exp = hairpin_mode & 0x10;
2568 if (!(hairpin_mode & 0xf)) {
2572 } else if (hairpin_mode & 0x1) {
2573 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2574 RTE_ETH_DEV_NO_OWNER);
2575 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2576 peer_tx_port = rte_eth_find_next_owned_by(0,
2577 RTE_ETH_DEV_NO_OWNER);
2578 if (p_pi != RTE_MAX_ETHPORTS) {
2579 peer_rx_port = p_pi;
2583 /* Last port will be the peer RX port of the first. */
2584 RTE_ETH_FOREACH_DEV(next_pi)
2585 peer_rx_port = next_pi;
2588 } else if (hairpin_mode & 0x2) {
2590 peer_rx_port = p_pi;
2592 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2593 RTE_ETH_DEV_NO_OWNER);
2594 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2597 peer_tx_port = peer_rx_port;
2601 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2602 hairpin_conf.peers[0].port = peer_rx_port;
2603 hairpin_conf.peers[0].queue = i + nb_rxq;
2604 hairpin_conf.manual_bind = !!manual;
2605 hairpin_conf.tx_explicit = !!tx_exp;
2606 diag = rte_eth_tx_hairpin_queue_setup
2607 (pi, qi, nb_txd, &hairpin_conf);
2612 /* Fail to setup rx queue, return */
2613 if (port->port_status == RTE_PORT_HANDLING)
2614 port->port_status = RTE_PORT_STOPPED;
2617 "Port %d can not be set back to stopped\n", pi);
2618 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2620 /* try to reconfigure queues next time */
2621 port->need_reconfig_queues = 1;
2624 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2625 hairpin_conf.peers[0].port = peer_tx_port;
2626 hairpin_conf.peers[0].queue = i + nb_txq;
2627 hairpin_conf.manual_bind = !!manual;
2628 hairpin_conf.tx_explicit = !!tx_exp;
2629 diag = rte_eth_rx_hairpin_queue_setup
2630 (pi, qi, nb_rxd, &hairpin_conf);
2635 /* Fail to setup rx queue, return */
2636 if (port->port_status == RTE_PORT_HANDLING)
2637 port->port_status = RTE_PORT_STOPPED;
2640 "Port %d can not be set back to stopped\n", pi);
2641 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2643 /* try to reconfigure queues next time */
2644 port->need_reconfig_queues = 1;
2650 /* Configure the Rx with optional split. */
2652 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2653 uint16_t nb_rx_desc, unsigned int socket_id,
2654 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2656 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2657 unsigned int i, mp_n;
2660 if (rx_pkt_nb_segs <= 1 ||
2661 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2662 rx_conf->rx_seg = NULL;
2663 rx_conf->rx_nseg = 0;
2664 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2665 nb_rx_desc, socket_id,
2669 for (i = 0; i < rx_pkt_nb_segs; i++) {
2670 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2671 struct rte_mempool *mpx;
2673 * Use last valid pool for the segments with number
2674 * exceeding the pool index.
2676 mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2677 mpx = mbuf_pool_find(socket_id, mp_n);
2678 /* Handle zero as mbuf data buffer size. */
2679 rx_seg->length = rx_pkt_seg_lengths[i] ?
2680 rx_pkt_seg_lengths[i] :
2681 mbuf_data_size[mp_n];
2682 rx_seg->offset = i < rx_pkt_nb_offs ?
2683 rx_pkt_seg_offsets[i] : 0;
2684 rx_seg->mp = mpx ? mpx : mp;
2686 rx_conf->rx_nseg = rx_pkt_nb_segs;
2687 rx_conf->rx_seg = rx_useg;
2688 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2689 socket_id, rx_conf, NULL);
2690 rx_conf->rx_seg = NULL;
2691 rx_conf->rx_nseg = 0;
2693 ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
2694 RTE_ETH_QUEUE_STATE_STOPPED :
2695 RTE_ETH_QUEUE_STATE_STARTED;
2700 alloc_xstats_display_info(portid_t pi)
2702 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2703 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2704 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2706 if (xstats_display_num == 0)
2709 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2710 if (*ids_supp == NULL)
2713 *prev_values = calloc(xstats_display_num,
2714 sizeof(**prev_values));
2715 if (*prev_values == NULL)
2716 goto fail_prev_values;
2718 *curr_values = calloc(xstats_display_num,
2719 sizeof(**curr_values));
2720 if (*curr_values == NULL)
2721 goto fail_curr_values;
2723 ports[pi].xstats_info.allocated = true;
2736 free_xstats_display_info(portid_t pi)
2738 if (!ports[pi].xstats_info.allocated)
2740 free(ports[pi].xstats_info.ids_supp);
2741 free(ports[pi].xstats_info.prev_values);
2742 free(ports[pi].xstats_info.curr_values);
2743 ports[pi].xstats_info.allocated = false;
2746 /** Fill helper structures for specified port to show extended statistics. */
2748 fill_xstats_display_info_for_port(portid_t pi)
2750 unsigned int stat, stat_supp;
2751 const char *xstat_name;
2752 struct rte_port *port;
2756 if (xstats_display_num == 0)
2759 if (pi == (portid_t)RTE_PORT_ALL) {
2760 fill_xstats_display_info();
2765 if (port->port_status != RTE_PORT_STARTED)
2768 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2769 rte_exit(EXIT_FAILURE,
2770 "Failed to allocate xstats display memory\n");
2772 ids_supp = port->xstats_info.ids_supp;
2773 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2774 xstat_name = xstats_display[stat].name;
2775 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2776 ids_supp + stat_supp);
2778 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2779 xstat_name, pi, stat);
2785 port->xstats_info.ids_supp_sz = stat_supp;
2788 /** Fill helper structures for all ports to show extended statistics. */
2790 fill_xstats_display_info(void)
2794 if (xstats_display_num == 0)
2797 RTE_ETH_FOREACH_DEV(pi)
2798 fill_xstats_display_info_for_port(pi);
2802 start_port(portid_t pid)
2804 int diag, need_check_link_status = -1;
2806 portid_t p_pi = RTE_MAX_ETHPORTS;
2807 portid_t pl[RTE_MAX_ETHPORTS];
2808 portid_t peer_pl[RTE_MAX_ETHPORTS];
2809 uint16_t cnt_pi = 0;
2810 uint16_t cfg_pi = 0;
2813 struct rte_port *port;
2814 struct rte_eth_hairpin_cap cap;
2816 if (port_id_is_invalid(pid, ENABLED_WARN))
2819 RTE_ETH_FOREACH_DEV(pi) {
2820 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2823 if (port_is_bonding_slave(pi)) {
2825 "Please remove port %d from bonded device.\n",
2830 need_check_link_status = 0;
2832 if (port->port_status == RTE_PORT_STOPPED)
2833 port->port_status = RTE_PORT_HANDLING;
2835 fprintf(stderr, "Port %d is now not stopped\n", pi);
2839 if (port->need_reconfig > 0) {
2840 struct rte_eth_conf dev_conf;
2843 port->need_reconfig = 0;
2845 if (flow_isolate_all) {
2846 int ret = port_flow_isolate(pi, 1);
2849 "Failed to apply isolated mode on port %d\n",
2854 configure_rxtx_dump_callbacks(0);
2855 printf("Configuring Port %d (socket %u)\n", pi,
2857 if (nb_hairpinq > 0 &&
2858 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2860 "Port %d doesn't support hairpin queues\n",
2865 /* configure port */
2866 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2867 nb_txq + nb_hairpinq,
2870 if (port->port_status == RTE_PORT_HANDLING)
2871 port->port_status = RTE_PORT_STOPPED;
2874 "Port %d can not be set back to stopped\n",
2876 fprintf(stderr, "Fail to configure port %d\n",
2878 /* try to reconfigure port next time */
2879 port->need_reconfig = 1;
2882 /* get device configuration*/
2884 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2886 "port %d can not get device configuration\n",
2890 /* Apply Rx offloads configuration */
2891 if (dev_conf.rxmode.offloads !=
2892 port->dev_conf.rxmode.offloads) {
2893 port->dev_conf.rxmode.offloads |=
2894 dev_conf.rxmode.offloads;
2896 k < port->dev_info.max_rx_queues;
2898 port->rxq[k].conf.offloads |=
2899 dev_conf.rxmode.offloads;
2901 /* Apply Tx offloads configuration */
2902 if (dev_conf.txmode.offloads !=
2903 port->dev_conf.txmode.offloads) {
2904 port->dev_conf.txmode.offloads |=
2905 dev_conf.txmode.offloads;
2907 k < port->dev_info.max_tx_queues;
2909 port->txq[k].conf.offloads |=
2910 dev_conf.txmode.offloads;
2913 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2914 port->need_reconfig_queues = 0;
2915 /* setup tx queues */
2916 for (qi = 0; qi < nb_txq; qi++) {
2917 struct rte_eth_txconf *conf =
2918 &port->txq[qi].conf;
2920 if ((numa_support) &&
2921 (txring_numa[pi] != NUMA_NO_CONFIG))
2922 diag = rte_eth_tx_queue_setup(pi, qi,
2923 port->nb_tx_desc[qi],
2925 &(port->txq[qi].conf));
2927 diag = rte_eth_tx_queue_setup(pi, qi,
2928 port->nb_tx_desc[qi],
2930 &(port->txq[qi].conf));
2933 port->txq[qi].state =
2934 conf->tx_deferred_start ?
2935 RTE_ETH_QUEUE_STATE_STOPPED :
2936 RTE_ETH_QUEUE_STATE_STARTED;
2940 /* Fail to setup tx queue, return */
2941 if (port->port_status == RTE_PORT_HANDLING)
2942 port->port_status = RTE_PORT_STOPPED;
2945 "Port %d can not be set back to stopped\n",
2948 "Fail to configure port %d tx queues\n",
2950 /* try to reconfigure queues next time */
2951 port->need_reconfig_queues = 1;
2954 for (qi = 0; qi < nb_rxq; qi++) {
2955 /* setup rx queues */
2956 if ((numa_support) &&
2957 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2958 struct rte_mempool * mp =
2960 (rxring_numa[pi], 0);
2963 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2968 diag = rx_queue_setup(pi, qi,
2969 port->nb_rx_desc[qi],
2971 &(port->rxq[qi].conf),
2974 struct rte_mempool *mp =
2976 (port->socket_id, 0);
2979 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2983 diag = rx_queue_setup(pi, qi,
2984 port->nb_rx_desc[qi],
2986 &(port->rxq[qi].conf),
2992 /* Fail to setup rx queue, return */
2993 if (port->port_status == RTE_PORT_HANDLING)
2994 port->port_status = RTE_PORT_STOPPED;
2997 "Port %d can not be set back to stopped\n",
3000 "Fail to configure port %d rx queues\n",
3002 /* try to reconfigure queues next time */
3003 port->need_reconfig_queues = 1;
3006 /* setup hairpin queues */
3007 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
3010 configure_rxtx_dump_callbacks(verbose_level);
3012 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3016 "Port %d: Failed to disable Ptype parsing\n",
3024 diag = eth_dev_start_mp(pi);
3026 fprintf(stderr, "Fail to start port %d: %s\n",
3027 pi, rte_strerror(-diag));
3029 /* Fail to setup rx queue, return */
3030 if (port->port_status == RTE_PORT_HANDLING)
3031 port->port_status = RTE_PORT_STOPPED;
3034 "Port %d can not be set back to stopped\n",
3039 if (port->port_status == RTE_PORT_HANDLING)
3040 port->port_status = RTE_PORT_STARTED;
3042 fprintf(stderr, "Port %d can not be set into started\n",
3045 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3046 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3047 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3049 /* at least one port started, need checking link status */
3050 need_check_link_status = 1;
3055 if (need_check_link_status == 1 && !no_link_check)
3056 check_all_ports_link_status(RTE_PORT_ALL);
3057 else if (need_check_link_status == 0)
3058 fprintf(stderr, "Please stop the ports first\n");
3060 if (hairpin_mode & 0xf) {
3064 /* bind all started hairpin ports */
3065 for (i = 0; i < cfg_pi; i++) {
3067 /* bind current Tx to all peer Rx */
3068 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3069 RTE_MAX_ETHPORTS, 1);
3072 for (j = 0; j < peer_pi; j++) {
3073 if (!port_is_started(peer_pl[j]))
3075 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
3078 "Error during binding hairpin Tx port %u to %u: %s\n",
3080 rte_strerror(-diag));
3084 /* bind all peer Tx to current Rx */
3085 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3086 RTE_MAX_ETHPORTS, 0);
3089 for (j = 0; j < peer_pi; j++) {
3090 if (!port_is_started(peer_pl[j]))
3092 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
3095 "Error during binding hairpin Tx port %u to %u: %s\n",
3097 rte_strerror(-diag));
3104 fill_xstats_display_info_for_port(pid);
3111 stop_port(portid_t pid)
3114 struct rte_port *port;
3115 int need_check_link_status = 0;
3116 portid_t peer_pl[RTE_MAX_ETHPORTS];
3119 if (port_id_is_invalid(pid, ENABLED_WARN))
3122 printf("Stopping ports...\n");
3124 RTE_ETH_FOREACH_DEV(pi) {
3125 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3128 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3130 "Please remove port %d from forwarding configuration.\n",
3135 if (port_is_bonding_slave(pi)) {
3137 "Please remove port %d from bonded device.\n",
3143 if (port->port_status == RTE_PORT_STARTED)
3144 port->port_status = RTE_PORT_HANDLING;
3148 if (hairpin_mode & 0xf) {
3151 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3152 /* unbind all peer Tx from current Rx */
3153 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3154 RTE_MAX_ETHPORTS, 0);
3157 for (j = 0; j < peer_pi; j++) {
3158 if (!port_is_started(peer_pl[j]))
3160 rte_eth_hairpin_unbind(peer_pl[j], pi);
3164 if (port->flow_list)
3165 port_flow_flush(pi);
3167 if (eth_dev_stop_mp(pi) != 0)
3168 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3171 if (port->port_status == RTE_PORT_HANDLING)
3172 port->port_status = RTE_PORT_STOPPED;
3174 fprintf(stderr, "Port %d can not be set into stopped\n",
3176 need_check_link_status = 1;
3178 if (need_check_link_status && !no_link_check)
3179 check_all_ports_link_status(RTE_PORT_ALL);
3185 remove_invalid_ports_in(portid_t *array, portid_t *total)
3188 portid_t new_total = 0;
3190 for (i = 0; i < *total; i++)
3191 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3192 array[new_total] = array[i];
3199 remove_invalid_ports(void)
3201 remove_invalid_ports_in(ports_ids, &nb_ports);
3202 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3203 nb_cfg_ports = nb_fwd_ports;
3207 clear_bonding_slave_device(portid_t *slave_pids, uint16_t num_slaves)
3209 struct rte_port *port;
3213 for (i = 0; i < num_slaves; i++) {
3214 slave_pid = slave_pids[i];
3215 if (port_is_started(slave_pid) == 1) {
3216 if (rte_eth_dev_stop(slave_pid) != 0)
3217 fprintf(stderr, "rte_eth_dev_stop failed for port %u\n",
3220 port = &ports[slave_pid];
3221 port->port_status = RTE_PORT_STOPPED;
3224 clear_port_slave_flag(slave_pid);
3226 /* Close slave device when testpmd quit or is killed. */
3227 if (cl_quit == 1 || f_quit == 1)
3228 rte_eth_dev_close(slave_pid);
3233 close_port(portid_t pid)
3236 struct rte_port *port;
3237 portid_t slave_pids[RTE_MAX_ETHPORTS];
3240 if (port_id_is_invalid(pid, ENABLED_WARN))
3243 printf("Closing ports...\n");
3245 RTE_ETH_FOREACH_DEV(pi) {
3246 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3249 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3251 "Please remove port %d from forwarding configuration.\n",
3256 if (port_is_bonding_slave(pi)) {
3258 "Please remove port %d from bonded device.\n",
3264 if (port->port_status == RTE_PORT_CLOSED) {
3265 fprintf(stderr, "Port %d is already closed\n", pi);
3269 if (is_proc_primary()) {
3270 mcast_addr_pool_destroy(pi);
3271 port_flow_flush(pi);
3272 port_flex_item_flush(pi);
3273 port_action_handle_flush(pi);
3275 if (port->bond_flag == 1)
3276 num_slaves = rte_eth_bond_slaves_get(pi,
3277 slave_pids, RTE_MAX_ETHPORTS);
3279 rte_eth_dev_close(pi);
3281 * If this port is bonded device, all slaves under the
3282 * device need to be removed or closed.
3284 if (port->bond_flag == 1 && num_slaves > 0)
3285 clear_bonding_slave_device(slave_pids,
3289 free_xstats_display_info(pi);
3292 remove_invalid_ports();
3297 reset_port(portid_t pid)
3301 struct rte_port *port;
3303 if (port_id_is_invalid(pid, ENABLED_WARN))
3306 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3307 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3309 "Can not reset port(s), please stop port(s) first.\n");
3313 printf("Resetting ports...\n");
3315 RTE_ETH_FOREACH_DEV(pi) {
3316 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3319 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3321 "Please remove port %d from forwarding configuration.\n",
3326 if (port_is_bonding_slave(pi)) {
3328 "Please remove port %d from bonded device.\n",
3333 diag = rte_eth_dev_reset(pi);
3336 port->need_reconfig = 1;
3337 port->need_reconfig_queues = 1;
3339 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3348 attach_port(char *identifier)
3351 struct rte_dev_iterator iterator;
3353 printf("Attaching a new port...\n");
3355 if (identifier == NULL) {
3356 fprintf(stderr, "Invalid parameters are specified\n");
3360 if (rte_dev_probe(identifier) < 0) {
3361 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3365 /* first attach mode: event */
3366 if (setup_on_probe_event) {
3367 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3368 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3369 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3370 ports[pi].need_setup != 0)
3371 setup_attached_port(pi);
3375 /* second attach mode: iterator */
3376 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3377 /* setup ports matching the devargs used for probing */
3378 if (port_is_forwarding(pi))
3379 continue; /* port was already attached before */
3380 setup_attached_port(pi);
3385 setup_attached_port(portid_t pi)
3387 unsigned int socket_id;
3390 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3391 /* if socket_id is invalid, set to the first available socket. */
3392 if (check_socket_id(socket_id) < 0)
3393 socket_id = socket_ids[0];
3394 reconfig(pi, socket_id);
3395 ret = rte_eth_promiscuous_enable(pi);
3398 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3399 pi, rte_strerror(-ret));
3401 ports_ids[nb_ports++] = pi;
3402 fwd_ports_ids[nb_fwd_ports++] = pi;
3403 nb_cfg_ports = nb_fwd_ports;
3404 ports[pi].need_setup = 0;
3405 ports[pi].port_status = RTE_PORT_STOPPED;
3407 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3412 detach_device(struct rte_device *dev)
3417 fprintf(stderr, "Device already removed\n");
3421 printf("Removing a device...\n");
3423 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3424 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3425 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3426 fprintf(stderr, "Port %u not stopped\n",
3430 port_flow_flush(sibling);
3434 if (rte_dev_remove(dev) < 0) {
3435 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3438 remove_invalid_ports();
3440 printf("Device is detached\n");
3441 printf("Now total ports is %d\n", nb_ports);
3447 detach_port_device(portid_t port_id)
3450 struct rte_eth_dev_info dev_info;
3452 if (port_id_is_invalid(port_id, ENABLED_WARN))
3455 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3456 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3457 fprintf(stderr, "Port not stopped\n");
3460 fprintf(stderr, "Port was not closed\n");
3463 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3466 "Failed to get device info for port %d, not detaching\n",
3470 detach_device(dev_info.device);
3474 detach_devargs(char *identifier)
3476 struct rte_dev_iterator iterator;
3477 struct rte_devargs da;
3480 printf("Removing a device...\n");
3482 memset(&da, 0, sizeof(da));
3483 if (rte_devargs_parsef(&da, "%s", identifier)) {
3484 fprintf(stderr, "cannot parse identifier\n");
3488 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3489 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3490 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3491 fprintf(stderr, "Port %u not stopped\n",
3493 rte_eth_iterator_cleanup(&iterator);
3494 rte_devargs_reset(&da);
3497 port_flow_flush(port_id);
3501 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3502 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3503 da.name, da.bus->name);
3504 rte_devargs_reset(&da);
3508 remove_invalid_ports();
3510 printf("Device %s is detached\n", identifier);
3511 printf("Now total ports is %d\n", nb_ports);
3513 rte_devargs_reset(&da);
3524 stop_packet_forwarding();
3526 #ifndef RTE_EXEC_ENV_WINDOWS
3527 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3529 if (mp_alloc_type == MP_ALLOC_ANON)
3530 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3535 if (ports != NULL) {
3537 RTE_ETH_FOREACH_DEV(pt_id) {
3538 printf("\nStopping port %d...\n", pt_id);
3542 RTE_ETH_FOREACH_DEV(pt_id) {
3543 printf("\nShutting down port %d...\n", pt_id);
3550 ret = rte_dev_event_monitor_stop();
3553 "fail to stop device event monitor.");
3557 ret = rte_dev_event_callback_unregister(NULL,
3558 dev_event_callback, NULL);
3561 "fail to unregister device event callback.\n");
3565 ret = rte_dev_hotplug_handle_disable();
3568 "fail to disable hotplug handling.\n");
3572 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3574 mempool_free_mp(mempools[i]);
3576 free(xstats_display);
3578 printf("\nBye...\n");
3581 typedef void (*cmd_func_t)(void);
3582 struct pmd_test_command {
3583 const char *cmd_name;
3584 cmd_func_t cmd_func;
3587 /* Check the link status of all ports in up to 9s, and print them finally */
3589 check_all_ports_link_status(uint32_t port_mask)
3591 #define CHECK_INTERVAL 100 /* 100ms */
3592 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3594 uint8_t count, all_ports_up, print_flag = 0;
3595 struct rte_eth_link link;
3597 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3599 printf("Checking link statuses...\n");
3601 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3603 RTE_ETH_FOREACH_DEV(portid) {
3604 if ((port_mask & (1 << portid)) == 0)
3606 memset(&link, 0, sizeof(link));
3607 ret = rte_eth_link_get_nowait(portid, &link);
3610 if (print_flag == 1)
3612 "Port %u link get failed: %s\n",
3613 portid, rte_strerror(-ret));
3616 /* print link status if flag set */
3617 if (print_flag == 1) {
3618 rte_eth_link_to_str(link_status,
3619 sizeof(link_status), &link);
3620 printf("Port %d %s\n", portid, link_status);
3623 /* clear all_ports_up flag if any link down */
3624 if (link.link_status == RTE_ETH_LINK_DOWN) {
3629 /* after finally printing all link status, get out */
3630 if (print_flag == 1)
3633 if (all_ports_up == 0) {
3635 rte_delay_ms(CHECK_INTERVAL);
3638 /* set the print_flag if all ports up or timeout */
3639 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3649 rmv_port_callback(void *arg)
3651 int need_to_start = 0;
3652 int org_no_link_check = no_link_check;
3653 portid_t port_id = (intptr_t)arg;
3654 struct rte_eth_dev_info dev_info;
3657 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3659 if (!test_done && port_is_forwarding(port_id)) {
3661 stop_packet_forwarding();
3665 no_link_check = org_no_link_check;
3667 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3670 "Failed to get device info for port %d, not detaching\n",
3673 struct rte_device *device = dev_info.device;
3674 close_port(port_id);
3675 detach_device(device); /* might be already removed or have more ports */
3678 start_packet_forwarding(0);
3681 /* This function is used by the interrupt thread */
3683 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3686 RTE_SET_USED(param);
3687 RTE_SET_USED(ret_param);
3689 if (type >= RTE_ETH_EVENT_MAX) {
3691 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3692 port_id, __func__, type);
3694 } else if (event_print_mask & (UINT32_C(1) << type)) {
3695 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3696 eth_event_desc[type]);
3701 case RTE_ETH_EVENT_NEW:
3702 ports[port_id].need_setup = 1;
3703 ports[port_id].port_status = RTE_PORT_HANDLING;
3705 case RTE_ETH_EVENT_INTR_RMV:
3706 if (port_id_is_invalid(port_id, DISABLED_WARN))
3708 if (rte_eal_alarm_set(100000,
3709 rmv_port_callback, (void *)(intptr_t)port_id))
3711 "Could not set up deferred device removal\n");
3713 case RTE_ETH_EVENT_DESTROY:
3714 ports[port_id].port_status = RTE_PORT_CLOSED;
3715 printf("Port %u is closed\n", port_id);
3717 case RTE_ETH_EVENT_RX_AVAIL_THRESH: {
3721 /* avail_thresh query API rewinds rxq_id, no need to check max RxQ num */
3722 for (rxq_id = 0; ; rxq_id++) {
3723 ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id,
3727 printf("Received avail_thresh event, port: %u, rxq_id: %u\n",
3739 register_eth_event_callback(void)
3742 enum rte_eth_event_type event;
3744 for (event = RTE_ETH_EVENT_UNKNOWN;
3745 event < RTE_ETH_EVENT_MAX; event++) {
3746 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3751 TESTPMD_LOG(ERR, "Failed to register callback for "
3752 "%s event\n", eth_event_desc[event]);
3760 /* This function is used by the interrupt thread */
3762 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3763 __rte_unused void *arg)
3768 if (type >= RTE_DEV_EVENT_MAX) {
3769 fprintf(stderr, "%s called upon invalid event %d\n",
3775 case RTE_DEV_EVENT_REMOVE:
3776 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3778 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3780 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3785 * Because the user's callback is invoked in eal interrupt
3786 * callback, the interrupt callback need to be finished before
3787 * it can be unregistered when detaching device. So finish
3788 * callback soon and use a deferred removal to detach device
3789 * is need. It is a workaround, once the device detaching be
3790 * moved into the eal in the future, the deferred removal could
3793 if (rte_eal_alarm_set(100000,
3794 rmv_port_callback, (void *)(intptr_t)port_id))
3796 "Could not set up deferred device removal\n");
3798 case RTE_DEV_EVENT_ADD:
3799 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3801 /* TODO: After finish kernel driver binding,
3802 * begin to attach port.
3811 rxtx_port_config(portid_t pid)
3815 struct rte_port *port = &ports[pid];
3817 for (qid = 0; qid < nb_rxq; qid++) {
3818 offloads = port->rxq[qid].conf.offloads;
3819 port->rxq[qid].conf = port->dev_info.default_rxconf;
3821 if (rxq_share > 0 &&
3822 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3823 /* Non-zero share group to enable RxQ share. */
3824 port->rxq[qid].conf.share_group = pid / rxq_share + 1;
3825 port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3829 port->rxq[qid].conf.offloads = offloads;
3831 /* Check if any Rx parameters have been passed */
3832 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3833 port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3835 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3836 port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3838 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3839 port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3841 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3842 port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3844 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3845 port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3847 port->nb_rx_desc[qid] = nb_rxd;
3850 for (qid = 0; qid < nb_txq; qid++) {
3851 offloads = port->txq[qid].conf.offloads;
3852 port->txq[qid].conf = port->dev_info.default_txconf;
3854 port->txq[qid].conf.offloads = offloads;
3856 /* Check if any Tx parameters have been passed */
3857 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3858 port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3860 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3861 port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3863 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3864 port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
3866 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3867 port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
3869 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3870 port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
3872 port->nb_tx_desc[qid] = nb_txd;
3877 * Helper function to set MTU from frame size
3879 * port->dev_info should be set before calling this function.
3881 * return 0 on success, negative on error
3884 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3886 struct rte_port *port = &ports[portid];
3887 uint32_t eth_overhead;
3888 uint16_t mtu, new_mtu;
3890 eth_overhead = get_eth_overhead(&port->dev_info);
3892 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3893 printf("Failed to get MTU for port %u\n", portid);
3897 new_mtu = max_rx_pktlen - eth_overhead;
3902 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3904 "Failed to set MTU to %u for port %u\n",
3909 port->dev_conf.rxmode.mtu = new_mtu;
3915 init_port_config(void)
3918 struct rte_port *port;
3921 RTE_ETH_FOREACH_DEV(pid) {
3923 port->dev_conf.fdir_conf = fdir_conf;
3925 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3930 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3931 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3932 rss_hf & port->dev_info.flow_type_rss_offloads;
3934 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3935 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3938 if (port->dcb_flag == 0) {
3939 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3940 port->dev_conf.rxmode.mq_mode =
3941 (enum rte_eth_rx_mq_mode)
3942 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3944 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3945 port->dev_conf.rxmode.offloads &=
3946 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3949 i < port->dev_info.nb_rx_queues;
3951 port->rxq[i].conf.offloads &=
3952 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3956 rxtx_port_config(pid);
3958 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3962 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3963 rte_pmd_ixgbe_bypass_init(pid);
3966 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3967 port->dev_conf.intr_conf.lsc = 1;
3968 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3969 port->dev_conf.intr_conf.rmv = 1;
3973 void set_port_slave_flag(portid_t slave_pid)
3975 struct rte_port *port;
3977 port = &ports[slave_pid];
3978 port->slave_flag = 1;
3981 void clear_port_slave_flag(portid_t slave_pid)
3983 struct rte_port *port;
3985 port = &ports[slave_pid];
3986 port->slave_flag = 0;
3989 uint8_t port_is_bonding_slave(portid_t slave_pid)
3991 struct rte_port *port;
3992 struct rte_eth_dev_info dev_info;
3995 port = &ports[slave_pid];
3996 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3999 "Failed to get device info for port id %d,"
4000 "cannot determine if the port is a bonded slave",
4004 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
4009 const uint16_t vlan_tags[] = {
4010 0, 1, 2, 3, 4, 5, 6, 7,
4011 8, 9, 10, 11, 12, 13, 14, 15,
4012 16, 17, 18, 19, 20, 21, 22, 23,
4013 24, 25, 26, 27, 28, 29, 30, 31
4017 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
4018 enum dcb_mode_enable dcb_mode,
4019 enum rte_eth_nb_tcs num_tcs,
4024 struct rte_eth_rss_conf rss_conf;
4027 * Builds up the correct configuration for dcb+vt based on the vlan tags array
4028 * given above, and the number of traffic classes available for use.
4030 if (dcb_mode == DCB_VT_ENABLED) {
4031 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4032 ð_conf->rx_adv_conf.vmdq_dcb_conf;
4033 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
4034 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
4036 /* VMDQ+DCB RX and TX configurations */
4037 vmdq_rx_conf->enable_default_pool = 0;
4038 vmdq_rx_conf->default_pool = 0;
4039 vmdq_rx_conf->nb_queue_pools =
4040 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4041 vmdq_tx_conf->nb_queue_pools =
4042 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
4044 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
4045 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
4046 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
4047 vmdq_rx_conf->pool_map[i].pools =
4048 1 << (i % vmdq_rx_conf->nb_queue_pools);
4050 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4051 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
4052 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
4055 /* set DCB mode of RX and TX of multiple queues */
4056 eth_conf->rxmode.mq_mode =
4057 (enum rte_eth_rx_mq_mode)
4058 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4059 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
4061 struct rte_eth_dcb_rx_conf *rx_conf =
4062 ð_conf->rx_adv_conf.dcb_rx_conf;
4063 struct rte_eth_dcb_tx_conf *tx_conf =
4064 ð_conf->tx_adv_conf.dcb_tx_conf;
4066 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
4068 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4072 rx_conf->nb_tcs = num_tcs;
4073 tx_conf->nb_tcs = num_tcs;
4075 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4076 rx_conf->dcb_tc[i] = i % num_tcs;
4077 tx_conf->dcb_tc[i] = i % num_tcs;
4080 eth_conf->rxmode.mq_mode =
4081 (enum rte_eth_rx_mq_mode)
4082 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4083 eth_conf->rx_adv_conf.rss_conf = rss_conf;
4084 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
4088 eth_conf->dcb_capability_en =
4089 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4091 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4097 init_port_dcb_config(portid_t pid,
4098 enum dcb_mode_enable dcb_mode,
4099 enum rte_eth_nb_tcs num_tcs,
4102 struct rte_eth_conf port_conf;
4103 struct rte_port *rte_port;
4107 if (num_procs > 1) {
4108 printf("The multi-process feature doesn't support dcb.\n");
4111 rte_port = &ports[pid];
4113 /* retain the original device configuration. */
4114 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4116 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
4117 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4120 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4121 /* remove RSS HASH offload for DCB in vt mode */
4122 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4123 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4124 for (i = 0; i < nb_rxq; i++)
4125 rte_port->rxq[i].conf.offloads &=
4126 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4129 /* re-configure the device . */
4130 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
4134 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
4138 /* If dev_info.vmdq_pool_base is greater than 0,
4139 * the queue id of vmdq pools is started after pf queues.
4141 if (dcb_mode == DCB_VT_ENABLED &&
4142 rte_port->dev_info.vmdq_pool_base > 0) {
4144 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
4149 /* Assume the ports in testpmd have the same dcb capability
4150 * and has the same number of rxq and txq in dcb mode
4152 if (dcb_mode == DCB_VT_ENABLED) {
4153 if (rte_port->dev_info.max_vfs > 0) {
4154 nb_rxq = rte_port->dev_info.nb_rx_queues;
4155 nb_txq = rte_port->dev_info.nb_tx_queues;
4157 nb_rxq = rte_port->dev_info.max_rx_queues;
4158 nb_txq = rte_port->dev_info.max_tx_queues;
4161 /*if vt is disabled, use all pf queues */
4162 if (rte_port->dev_info.vmdq_pool_base == 0) {
4163 nb_rxq = rte_port->dev_info.max_rx_queues;
4164 nb_txq = rte_port->dev_info.max_tx_queues;
4166 nb_rxq = (queueid_t)num_tcs;
4167 nb_txq = (queueid_t)num_tcs;
4171 rx_free_thresh = 64;
4173 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4175 rxtx_port_config(pid);
4177 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4178 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4179 rx_vft_set(pid, vlan_tags[i], 1);
4181 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4185 rte_port->dcb_flag = 1;
4187 /* Enter DCB configuration status */
4198 /* Configuration of Ethernet ports. */
4199 ports = rte_zmalloc("testpmd: ports",
4200 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4201 RTE_CACHE_LINE_SIZE);
4202 if (ports == NULL) {
4203 rte_exit(EXIT_FAILURE,
4204 "rte_zmalloc(%d struct rte_port) failed\n",
4207 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4208 ports[i].xstats_info.allocated = false;
4209 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4210 LIST_INIT(&ports[i].flow_tunnel_list);
4211 /* Initialize ports NUMA structures */
4212 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4213 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4214 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4228 const char clr[] = { 27, '[', '2', 'J', '\0' };
4229 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4231 /* Clear screen and move to top left */
4232 printf("%s%s", clr, top_left);
4234 printf("\nPort statistics ====================================");
4235 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4236 nic_stats_display(fwd_ports_ids[i]);
4242 signal_handler(int signum)
4244 if (signum == SIGINT || signum == SIGTERM) {
4245 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4247 #ifdef RTE_LIB_PDUMP
4248 /* uninitialize packet capture framework */
4251 #ifdef RTE_LIB_LATENCYSTATS
4252 if (latencystats_enabled != 0)
4253 rte_latencystats_uninit();
4256 /* Set flag to indicate the force termination. */
4258 /* exit with the expected status */
4259 #ifndef RTE_EXEC_ENV_WINDOWS
4260 signal(signum, SIG_DFL);
4261 kill(getpid(), signum);
4267 main(int argc, char** argv)
4274 signal(SIGINT, signal_handler);
4275 signal(SIGTERM, signal_handler);
4277 testpmd_logtype = rte_log_register("testpmd");
4278 if (testpmd_logtype < 0)
4279 rte_exit(EXIT_FAILURE, "Cannot register log type");
4280 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4282 diag = rte_eal_init(argc, argv);
4284 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4285 rte_strerror(rte_errno));
4287 ret = register_eth_event_callback();
4289 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4291 #ifdef RTE_LIB_PDUMP
4292 /* initialize packet capture framework */
4297 RTE_ETH_FOREACH_DEV(port_id) {
4298 ports_ids[count] = port_id;
4301 nb_ports = (portid_t) count;
4303 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4305 /* allocate port structures, and init them */
4308 set_def_fwd_config();
4310 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4311 "Check the core mask argument\n");
4313 /* Bitrate/latency stats disabled by default */
4314 #ifdef RTE_LIB_BITRATESTATS
4315 bitrate_enabled = 0;
4317 #ifdef RTE_LIB_LATENCYSTATS
4318 latencystats_enabled = 0;
4321 /* on FreeBSD, mlockall() is disabled by default */
4322 #ifdef RTE_EXEC_ENV_FREEBSD
4331 launch_args_parse(argc, argv);
4333 #ifndef RTE_EXEC_ENV_WINDOWS
4334 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4335 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4340 if (tx_first && interactive)
4341 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4342 "interactive mode.\n");
4344 if (tx_first && lsc_interrupt) {
4346 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4350 if (!nb_rxq && !nb_txq)
4352 "Warning: Either rx or tx queues should be non-zero\n");
4354 if (nb_rxq > 1 && nb_rxq > nb_txq)
4356 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4362 ret = rte_dev_hotplug_handle_enable();
4365 "fail to enable hotplug handling.");
4369 ret = rte_dev_event_monitor_start();
4372 "fail to start device event monitoring.");
4376 ret = rte_dev_event_callback_register(NULL,
4377 dev_event_callback, NULL);
4380 "fail to register device event callback\n");
4385 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4386 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4388 /* set all ports to promiscuous mode by default */
4389 RTE_ETH_FOREACH_DEV(port_id) {
4390 ret = rte_eth_promiscuous_enable(port_id);
4393 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4394 port_id, rte_strerror(-ret));
4397 #ifdef RTE_LIB_METRICS
4398 /* Init metrics library */
4399 rte_metrics_init(rte_socket_id());
4402 #ifdef RTE_LIB_LATENCYSTATS
4403 if (latencystats_enabled != 0) {
4404 int ret = rte_latencystats_init(1, NULL);
4407 "Warning: latencystats init() returned error %d\n",
4409 fprintf(stderr, "Latencystats running on lcore %d\n",
4410 latencystats_lcore_id);
4414 /* Setup bitrate stats */
4415 #ifdef RTE_LIB_BITRATESTATS
4416 if (bitrate_enabled != 0) {
4417 bitrate_data = rte_stats_bitrate_create();
4418 if (bitrate_data == NULL)
4419 rte_exit(EXIT_FAILURE,
4420 "Could not allocate bitrate data.\n");
4421 rte_stats_bitrate_reg(bitrate_data);
4424 #ifdef RTE_LIB_CMDLINE
4425 if (init_cmdline() != 0)
4426 rte_exit(EXIT_FAILURE,
4427 "Could not initialise cmdline context.\n");
4429 if (strlen(cmdline_filename) != 0)
4430 cmdline_read_from_file(cmdline_filename);
4432 if (interactive == 1) {
4434 printf("Start automatic packet forwarding\n");
4435 start_packet_forwarding(0);
4447 printf("No commandline core given, start packet forwarding\n");
4448 start_packet_forwarding(tx_first);
4449 if (stats_period != 0) {
4450 uint64_t prev_time = 0, cur_time, diff_time = 0;
4451 uint64_t timer_period;
4453 /* Convert to number of cycles */
4454 timer_period = stats_period * rte_get_timer_hz();
4456 while (f_quit == 0) {
4457 cur_time = rte_get_timer_cycles();
4458 diff_time += cur_time - prev_time;
4460 if (diff_time >= timer_period) {
4462 /* Reset the timer */
4465 /* Sleep to avoid unnecessary checks */
4466 prev_time = cur_time;
4467 rte_delay_us_sleep(US_PER_S);
4471 printf("Press enter to exit\n");
4472 rc = read(0, &c, 1);
4478 ret = rte_eal_cleanup();
4480 rte_exit(EXIT_FAILURE,
4481 "EAL cleanup failed: %s\n", strerror(-ret));
4483 return EXIT_SUCCESS;