1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
51 #include <rte_pmd_ixgbe.h>
54 #include <rte_pdump.h>
57 #ifdef RTE_LIB_METRICS
58 #include <rte_metrics.h>
60 #ifdef RTE_LIB_BITRATESTATS
61 #include <rte_bitrate.h>
63 #ifdef RTE_LIB_LATENCYSTATS
64 #include <rte_latencystats.h>
66 #ifdef RTE_EXEC_ENV_WINDOWS
70 #include <rte_eth_bond.h>
76 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
77 #define HUGE_FLAG (0x40000)
79 #define HUGE_FLAG MAP_HUGETLB
82 #ifndef MAP_HUGE_SHIFT
83 /* older kernels (or FreeBSD) will not have this define */
84 #define HUGE_SHIFT (26)
86 #define HUGE_SHIFT MAP_HUGE_SHIFT
89 #define EXTMEM_HEAP_NAME "extmem"
91 * Zone size with the malloc overhead (max of debug and release variants)
92 * must fit into the smallest supported hugepage size (2M),
93 * so that an IOVA-contiguous zone of this size can always be allocated
94 * if there are free 2M hugepages.
96 #define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
98 uint16_t verbose_level = 0; /**< Silent by default. */
99 int testpmd_logtype; /**< Log type for testpmd logs */
101 /* use main core for command line ? */
102 uint8_t interactive = 0;
103 uint8_t auto_start = 0;
105 char cmdline_filename[PATH_MAX] = {0};
108 * NUMA support configuration.
109 * When set, the NUMA support attempts to dispatch the allocation of the
110 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
111 * probed ports among the CPU sockets 0 and 1.
112 * Otherwise, all memory is allocated from CPU socket 0.
114 uint8_t numa_support = 1; /**< numa enabled by default */
117 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
120 uint8_t socket_num = UMA_NO_CONFIG;
123 * Select mempool allocation type:
124 * - native: use regular DPDK memory
125 * - anon: use regular DPDK memory to create mempool, but populate using
126 * anonymous memory (may not be IOVA-contiguous)
127 * - xmem: use externally allocated hugepage memory
129 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
132 * Store specified sockets on which memory pool to be used by ports
135 uint8_t port_numa[RTE_MAX_ETHPORTS];
138 * Store specified sockets on which RX ring to be used by ports
141 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
144 * Store specified sockets on which TX ring to be used by ports
147 uint8_t txring_numa[RTE_MAX_ETHPORTS];
150 * Record the Ethernet address of peer target ports to which packets are
152 * Must be instantiated with the ethernet addresses of peer traffic generator
155 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
156 portid_t nb_peer_eth_addrs = 0;
159 * Probed Target Environment.
161 struct rte_port *ports; /**< For all probed ethernet ports. */
162 portid_t nb_ports; /**< Number of probed ethernet ports. */
163 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
164 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
166 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
169 * Test Forwarding Configuration.
170 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
171 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
173 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
174 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
175 portid_t nb_cfg_ports; /**< Number of configured ports. */
176 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
178 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
179 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
181 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
182 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
185 * Forwarding engines.
187 struct fwd_engine * fwd_engines[] = {
197 &five_tuple_swap_fwd_engine,
198 #ifdef RTE_LIBRTE_IEEE1588
199 &ieee1588_fwd_engine,
205 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
206 uint16_t mempool_flags;
208 struct fwd_config cur_fwd_config;
209 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
210 uint32_t retry_enabled;
211 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
212 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
214 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
215 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
216 DEFAULT_MBUF_DATA_SIZE
217 }; /**< Mbuf data space size. */
218 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
219 * specified on command-line. */
220 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
222 /** Extended statistics to show. */
223 struct rte_eth_xstat_name *xstats_display;
225 unsigned int xstats_display_num; /**< Size of extended statistics to show */
228 * In container, it cannot terminate the process which running with 'stats-period'
229 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
234 * Max Rx frame size, set by '--max-pkt-len' parameter.
236 uint32_t max_rx_pkt_len;
239 * Configuration of packet segments used to scatter received packets
240 * if some of split features is configured.
242 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
243 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
244 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
245 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
248 * Configuration of packet segments used by the "txonly" processing engine.
250 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
251 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
252 TXONLY_DEF_PACKET_LEN,
254 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
256 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
257 /**< Split policy for packets to TX. */
259 uint8_t txonly_multi_flow;
260 /**< Whether multiple flows are generated in TXONLY mode. */
262 uint32_t tx_pkt_times_inter;
263 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
265 uint32_t tx_pkt_times_intra;
266 /**< Timings for send scheduling in TXONLY mode, time between packets. */
268 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
269 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
270 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
271 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
273 /* current configuration is in DCB or not,0 means it is not in DCB mode */
274 uint8_t dcb_config = 0;
277 * Configurable number of RX/TX queues.
279 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
280 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
281 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
284 * Configurable number of RX/TX ring descriptors.
285 * Defaults are supplied by drivers via ethdev.
287 #define RTE_TEST_RX_DESC_DEFAULT 0
288 #define RTE_TEST_TX_DESC_DEFAULT 0
289 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
290 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
292 #define RTE_PMD_PARAM_UNSET -1
294 * Configurable values of RX and TX ring threshold registers.
297 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
298 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
299 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
301 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
302 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
303 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
306 * Configurable value of RX free threshold.
308 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
311 * Configurable value of RX drop enable.
313 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
316 * Configurable value of TX free threshold.
318 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
321 * Configurable value of TX RS bit threshold.
323 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
326 * Configurable value of buffered packets before sending.
328 uint16_t noisy_tx_sw_bufsz;
331 * Configurable value of packet buffer timeout.
333 uint16_t noisy_tx_sw_buf_flush_time;
336 * Configurable value for size of VNF internal memory area
337 * used for simulating noisy neighbour behaviour
339 uint64_t noisy_lkup_mem_sz;
342 * Configurable value of number of random writes done in
343 * VNF simulation memory area.
345 uint64_t noisy_lkup_num_writes;
348 * Configurable value of number of random reads done in
349 * VNF simulation memory area.
351 uint64_t noisy_lkup_num_reads;
354 * Configurable value of number of random reads/writes done in
355 * VNF simulation memory area.
357 uint64_t noisy_lkup_num_reads_writes;
360 * Receive Side Scaling (RSS) configuration.
362 uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
365 * Port topology configuration
367 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
370 * Avoids to flush all the RX streams before starts forwarding.
372 uint8_t no_flush_rx = 0; /* flush by default */
375 * Flow API isolated mode.
377 uint8_t flow_isolate_all;
380 * Avoids to check link status when starting/stopping a port.
382 uint8_t no_link_check = 0; /* check by default */
385 * Don't automatically start all ports in interactive mode.
387 uint8_t no_device_start = 0;
390 * Enable link status change notification
392 uint8_t lsc_interrupt = 1; /* enabled by default */
395 * Enable device removal notification.
397 uint8_t rmv_interrupt = 1; /* enabled by default */
399 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
401 /* After attach, port setup is called on event or by iterator */
402 bool setup_on_probe_event = true;
404 /* Clear ptypes on port initialization. */
405 uint8_t clear_ptypes = true;
407 /* Hairpin ports configuration mode. */
408 uint16_t hairpin_mode;
410 /* Pretty printing of ethdev events */
411 static const char * const eth_event_desc[] = {
412 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
413 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
414 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
415 [RTE_ETH_EVENT_INTR_RESET] = "reset",
416 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
417 [RTE_ETH_EVENT_IPSEC] = "IPsec",
418 [RTE_ETH_EVENT_MACSEC] = "MACsec",
419 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
420 [RTE_ETH_EVENT_NEW] = "device probed",
421 [RTE_ETH_EVENT_DESTROY] = "device released",
422 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
423 [RTE_ETH_EVENT_MAX] = NULL,
427 * Display or mask ether events
428 * Default to all events except VF_MBOX
430 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
431 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
432 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
433 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
434 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
435 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
436 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
437 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
439 * Decide if all memory are locked for performance.
444 * NIC bypass mode configuration options.
447 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
448 /* The NIC bypass watchdog timeout. */
449 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
453 #ifdef RTE_LIB_LATENCYSTATS
456 * Set when latency stats is enabled in the commandline
458 uint8_t latencystats_enabled;
461 * Lcore ID to service latency statistics.
463 lcoreid_t latencystats_lcore_id = -1;
468 * Ethernet device configuration.
470 struct rte_eth_rxmode rx_mode;
472 struct rte_eth_txmode tx_mode = {
473 .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
476 struct rte_eth_fdir_conf fdir_conf = {
477 .mode = RTE_FDIR_MODE_NONE,
478 .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
479 .status = RTE_FDIR_REPORT_STATUS,
481 .vlan_tci_mask = 0xFFEF,
483 .src_ip = 0xFFFFFFFF,
484 .dst_ip = 0xFFFFFFFF,
487 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
488 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
490 .src_port_mask = 0xFFFF,
491 .dst_port_mask = 0xFFFF,
492 .mac_addr_byte_mask = 0xFF,
493 .tunnel_type_mask = 1,
494 .tunnel_id_mask = 0xFFFFFFFF,
499 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
502 * Display zero values by default for xstats
504 uint8_t xstats_hide_zero;
507 * Measure of CPU cycles disabled by default
509 uint8_t record_core_cycles;
512 * Display of RX and TX bursts disabled by default
514 uint8_t record_burst_stats;
517 * Number of ports per shared Rx queue group, 0 disable.
521 unsigned int num_sockets = 0;
522 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
524 #ifdef RTE_LIB_BITRATESTATS
525 /* Bitrate statistics */
526 struct rte_stats_bitrates *bitrate_data;
527 lcoreid_t bitrate_lcore_id;
528 uint8_t bitrate_enabled;
532 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
533 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
537 * hexadecimal bitmask of RX mq mode can be enabled.
539 enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
542 * Used to set forced link speed
544 uint32_t eth_link_speed;
547 * ID of the current process in multi-process, used to
548 * configure the queues to be polled.
553 * Number of processes in multi-process, used to
554 * configure the queues to be polled.
556 unsigned int num_procs = 1;
559 eth_rx_metadata_negotiate_mp(uint16_t port_id)
561 uint64_t rx_meta_features = 0;
564 if (!is_proc_primary())
567 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
568 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
569 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
571 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
573 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
574 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
578 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
579 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
583 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
584 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
587 } else if (ret != -ENOTSUP) {
588 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
589 port_id, rte_strerror(-ret));
594 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
595 const struct rte_eth_conf *dev_conf)
597 if (is_proc_primary())
598 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
604 change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
608 portid_t slave_pids[RTE_MAX_ETHPORTS];
609 struct rte_port *port;
614 num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
616 if (num_slaves < 0) {
617 fprintf(stderr, "Failed to get slave list for port = %u\n",
622 for (i = 0; i < num_slaves; i++) {
623 slave_pid = slave_pids[i];
624 port = &ports[slave_pid];
626 is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
629 RTE_SET_USED(bond_pid);
630 RTE_SET_USED(is_stop);
636 eth_dev_start_mp(uint16_t port_id)
640 if (is_proc_primary()) {
641 ret = rte_eth_dev_start(port_id);
645 struct rte_port *port = &ports[port_id];
648 * Starting a bonded port also starts all slaves under the bonded
649 * device. So if this port is bond device, we need to modify the
650 * port status of these slaves.
652 if (port->bond_flag == 1)
653 return change_bonding_slave_port_status(port_id, false);
660 eth_dev_stop_mp(uint16_t port_id)
664 if (is_proc_primary()) {
665 ret = rte_eth_dev_stop(port_id);
669 struct rte_port *port = &ports[port_id];
672 * Stopping a bonded port also stops all slaves under the bonded
673 * device. So if this port is bond device, we need to modify the
674 * port status of these slaves.
676 if (port->bond_flag == 1)
677 return change_bonding_slave_port_status(port_id, true);
684 mempool_free_mp(struct rte_mempool *mp)
686 if (is_proc_primary())
687 rte_mempool_free(mp);
691 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
693 if (is_proc_primary())
694 return rte_eth_dev_set_mtu(port_id, mtu);
699 /* Forward function declarations */
700 static void setup_attached_port(portid_t pi);
701 static void check_all_ports_link_status(uint32_t port_mask);
702 static int eth_event_callback(portid_t port_id,
703 enum rte_eth_event_type type,
704 void *param, void *ret_param);
705 static void dev_event_callback(const char *device_name,
706 enum rte_dev_event_type type,
708 static void fill_xstats_display_info(void);
711 * Check if all the ports are started.
712 * If yes, return positive value. If not, return zero.
714 static int all_ports_started(void);
717 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
718 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
721 /* Holds the registered mbuf dynamic flags names. */
722 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
726 * Helper function to check if socket is already discovered.
727 * If yes, return positive value. If not, return zero.
730 new_socket_id(unsigned int socket_id)
734 for (i = 0; i < num_sockets; i++) {
735 if (socket_ids[i] == socket_id)
742 * Setup default configuration.
745 set_default_fwd_lcores_config(void)
749 unsigned int sock_num;
752 for (i = 0; i < RTE_MAX_LCORE; i++) {
753 if (!rte_lcore_is_enabled(i))
755 sock_num = rte_lcore_to_socket_id(i);
756 if (new_socket_id(sock_num)) {
757 if (num_sockets >= RTE_MAX_NUMA_NODES) {
758 rte_exit(EXIT_FAILURE,
759 "Total sockets greater than %u\n",
762 socket_ids[num_sockets++] = sock_num;
764 if (i == rte_get_main_lcore())
766 fwd_lcores_cpuids[nb_lc++] = i;
768 nb_lcores = (lcoreid_t) nb_lc;
769 nb_cfg_lcores = nb_lcores;
774 set_def_peer_eth_addrs(void)
778 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
779 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
780 peer_eth_addrs[i].addr_bytes[5] = i;
785 set_default_fwd_ports_config(void)
790 RTE_ETH_FOREACH_DEV(pt_id) {
791 fwd_ports_ids[i++] = pt_id;
793 /* Update sockets info according to the attached device */
794 int socket_id = rte_eth_dev_socket_id(pt_id);
795 if (socket_id >= 0 && new_socket_id(socket_id)) {
796 if (num_sockets >= RTE_MAX_NUMA_NODES) {
797 rte_exit(EXIT_FAILURE,
798 "Total sockets greater than %u\n",
801 socket_ids[num_sockets++] = socket_id;
805 nb_cfg_ports = nb_ports;
806 nb_fwd_ports = nb_ports;
810 set_def_fwd_config(void)
812 set_default_fwd_lcores_config();
813 set_def_peer_eth_addrs();
814 set_default_fwd_ports_config();
817 #ifndef RTE_EXEC_ENV_WINDOWS
818 /* extremely pessimistic estimation of memory required to create a mempool */
820 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
822 unsigned int n_pages, mbuf_per_pg, leftover;
823 uint64_t total_mem, mbuf_mem, obj_sz;
825 /* there is no good way to predict how much space the mempool will
826 * occupy because it will allocate chunks on the fly, and some of those
827 * will come from default DPDK memory while some will come from our
828 * external memory, so just assume 128MB will be enough for everyone.
830 uint64_t hdr_mem = 128 << 20;
832 /* account for possible non-contiguousness */
833 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
835 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
839 mbuf_per_pg = pgsz / obj_sz;
840 leftover = (nb_mbufs % mbuf_per_pg) > 0;
841 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
843 mbuf_mem = n_pages * pgsz;
845 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
847 if (total_mem > SIZE_MAX) {
848 TESTPMD_LOG(ERR, "Memory size too big\n");
851 *out = (size_t)total_mem;
857 pagesz_flags(uint64_t page_sz)
859 /* as per mmap() manpage, all page sizes are log2 of page size
860 * shifted by MAP_HUGE_SHIFT
862 int log2 = rte_log2_u64(page_sz);
864 return (log2 << HUGE_SHIFT);
868 alloc_mem(size_t memsz, size_t pgsz, bool huge)
873 /* allocate anonymous hugepages */
874 flags = MAP_ANONYMOUS | MAP_PRIVATE;
876 flags |= HUGE_FLAG | pagesz_flags(pgsz);
878 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
879 if (addr == MAP_FAILED)
885 struct extmem_param {
889 rte_iova_t *iova_table;
890 unsigned int iova_table_len;
894 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
897 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
898 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
899 unsigned int cur_page, n_pages, pgsz_idx;
900 size_t mem_sz, cur_pgsz;
901 rte_iova_t *iovas = NULL;
905 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
906 /* skip anything that is too big */
907 if (pgsizes[pgsz_idx] > SIZE_MAX)
910 cur_pgsz = pgsizes[pgsz_idx];
912 /* if we were told not to allocate hugepages, override */
914 cur_pgsz = sysconf(_SC_PAGESIZE);
916 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
918 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
922 /* allocate our memory */
923 addr = alloc_mem(mem_sz, cur_pgsz, huge);
925 /* if we couldn't allocate memory with a specified page size,
926 * that doesn't mean we can't do it with other page sizes, so
932 /* store IOVA addresses for every page in this memory area */
933 n_pages = mem_sz / cur_pgsz;
935 iovas = malloc(sizeof(*iovas) * n_pages);
938 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
941 /* lock memory if it's not huge pages */
945 /* populate IOVA addresses */
946 for (cur_page = 0; cur_page < n_pages; cur_page++) {
951 offset = cur_pgsz * cur_page;
952 cur = RTE_PTR_ADD(addr, offset);
954 /* touch the page before getting its IOVA */
955 *(volatile char *)cur = 0;
957 iova = rte_mem_virt2iova(cur);
959 iovas[cur_page] = iova;
964 /* if we couldn't allocate anything */
970 param->pgsz = cur_pgsz;
971 param->iova_table = iovas;
972 param->iova_table_len = n_pages;
978 munmap(addr, mem_sz);
984 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
986 struct extmem_param param;
989 memset(¶m, 0, sizeof(param));
991 /* check if our heap exists */
992 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
994 /* create our heap */
995 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
997 TESTPMD_LOG(ERR, "Cannot create heap\n");
1002 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
1004 TESTPMD_LOG(ERR, "Cannot create memory area\n");
1008 /* we now have a valid memory area, so add it to heap */
1009 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
1010 param.addr, param.len, param.iova_table,
1011 param.iova_table_len, param.pgsz);
1013 /* when using VFIO, memory is automatically mapped for DMA by EAL */
1015 /* not needed any more */
1016 free(param.iova_table);
1019 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
1020 munmap(param.addr, param.len);
1026 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
1032 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1033 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1038 RTE_ETH_FOREACH_DEV(pid) {
1039 struct rte_eth_dev_info dev_info;
1041 ret = eth_dev_info_get_print_err(pid, &dev_info);
1044 "unable to get device info for port %d on addr 0x%p,"
1045 "mempool unmapping will not be performed\n",
1050 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
1053 "unable to DMA unmap addr 0x%p "
1055 memhdr->addr, dev_info.device->name);
1058 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
1061 "unable to un-register addr 0x%p\n", memhdr->addr);
1066 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
1067 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1070 size_t page_size = sysconf(_SC_PAGESIZE);
1073 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1077 "unable to register addr 0x%p\n", memhdr->addr);
1080 RTE_ETH_FOREACH_DEV(pid) {
1081 struct rte_eth_dev_info dev_info;
1083 ret = eth_dev_info_get_print_err(pid, &dev_info);
1086 "unable to get device info for port %d on addr 0x%p,"
1087 "mempool mapping will not be performed\n",
1091 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1094 "unable to DMA map addr 0x%p "
1096 memhdr->addr, dev_info.device->name);
1103 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1104 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1106 struct rte_pktmbuf_extmem *xmem;
1107 unsigned int ext_num, zone_num, elt_num;
1110 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1111 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1112 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1114 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1116 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1117 "external buffer descriptors\n");
1121 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1122 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1123 const struct rte_memzone *mz;
1124 char mz_name[RTE_MEMZONE_NAMESIZE];
1127 ret = snprintf(mz_name, sizeof(mz_name),
1128 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1129 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1130 errno = ENAMETOOLONG;
1134 mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
1136 RTE_MEMZONE_IOVA_CONTIG |
1138 RTE_MEMZONE_SIZE_HINT_ONLY);
1141 * The caller exits on external buffer creation
1142 * error, so there is no need to free memzones.
1148 xseg->buf_ptr = mz->addr;
1149 xseg->buf_iova = mz->iova;
1150 xseg->buf_len = EXTBUF_ZONE_SIZE;
1151 xseg->elt_size = elt_size;
1153 if (ext_num == 0 && xmem != NULL) {
1162 * Configuration initialisation done once at init time.
1164 static struct rte_mempool *
1165 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1166 unsigned int socket_id, uint16_t size_idx)
1168 char pool_name[RTE_MEMPOOL_NAMESIZE];
1169 struct rte_mempool *rte_mp = NULL;
1170 #ifndef RTE_EXEC_ENV_WINDOWS
1173 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1175 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1176 if (!is_proc_primary()) {
1177 rte_mp = rte_mempool_lookup(pool_name);
1179 rte_exit(EXIT_FAILURE,
1180 "Get mbuf pool for socket %u failed: %s\n",
1181 socket_id, rte_strerror(rte_errno));
1186 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1187 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1189 switch (mp_alloc_type) {
1190 case MP_ALLOC_NATIVE:
1192 /* wrapper to rte_mempool_create() */
1193 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1194 rte_mbuf_best_mempool_ops());
1195 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1196 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1199 #ifndef RTE_EXEC_ENV_WINDOWS
1202 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1203 mb_size, (unsigned int) mb_mempool_cache,
1204 sizeof(struct rte_pktmbuf_pool_private),
1205 socket_id, mempool_flags);
1209 if (rte_mempool_populate_anon(rte_mp) == 0) {
1210 rte_mempool_free(rte_mp);
1214 rte_pktmbuf_pool_init(rte_mp, NULL);
1215 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1216 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1220 case MP_ALLOC_XMEM_HUGE:
1223 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1225 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1226 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1229 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1230 if (heap_socket < 0)
1231 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1233 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1234 rte_mbuf_best_mempool_ops());
1235 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1236 mb_mempool_cache, 0, mbuf_seg_size,
1243 struct rte_pktmbuf_extmem *ext_mem;
1244 unsigned int ext_num;
1246 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1247 socket_id, pool_name, &ext_mem);
1249 rte_exit(EXIT_FAILURE,
1250 "Can't create pinned data buffers\n");
1252 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1253 rte_mbuf_best_mempool_ops());
1254 rte_mp = rte_pktmbuf_pool_create_extbuf
1255 (pool_name, nb_mbuf, mb_mempool_cache,
1256 0, mbuf_seg_size, socket_id,
1263 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1267 #ifndef RTE_EXEC_ENV_WINDOWS
1270 if (rte_mp == NULL) {
1271 rte_exit(EXIT_FAILURE,
1272 "Creation of mbuf pool for socket %u failed: %s\n",
1273 socket_id, rte_strerror(rte_errno));
1274 } else if (verbose_level > 0) {
1275 rte_mempool_dump(stdout, rte_mp);
1281 * Check given socket id is valid or not with NUMA mode,
1282 * if valid, return 0, else return -1
1285 check_socket_id(const unsigned int socket_id)
1287 static int warning_once = 0;
1289 if (new_socket_id(socket_id)) {
1290 if (!warning_once && numa_support)
1292 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1300 * Get the allowed maximum number of RX queues.
1301 * *pid return the port id which has minimal value of
1302 * max_rx_queues in all ports.
1305 get_allowed_max_nb_rxq(portid_t *pid)
1307 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1308 bool max_rxq_valid = false;
1310 struct rte_eth_dev_info dev_info;
1312 RTE_ETH_FOREACH_DEV(pi) {
1313 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1316 max_rxq_valid = true;
1317 if (dev_info.max_rx_queues < allowed_max_rxq) {
1318 allowed_max_rxq = dev_info.max_rx_queues;
1322 return max_rxq_valid ? allowed_max_rxq : 0;
1326 * Check input rxq is valid or not.
1327 * If input rxq is not greater than any of maximum number
1328 * of RX queues of all ports, it is valid.
1329 * if valid, return 0, else return -1
1332 check_nb_rxq(queueid_t rxq)
1334 queueid_t allowed_max_rxq;
1337 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1338 if (rxq > allowed_max_rxq) {
1340 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1341 rxq, allowed_max_rxq, pid);
1348 * Get the allowed maximum number of TX queues.
1349 * *pid return the port id which has minimal value of
1350 * max_tx_queues in all ports.
1353 get_allowed_max_nb_txq(portid_t *pid)
1355 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1356 bool max_txq_valid = false;
1358 struct rte_eth_dev_info dev_info;
1360 RTE_ETH_FOREACH_DEV(pi) {
1361 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1364 max_txq_valid = true;
1365 if (dev_info.max_tx_queues < allowed_max_txq) {
1366 allowed_max_txq = dev_info.max_tx_queues;
1370 return max_txq_valid ? allowed_max_txq : 0;
1374 * Check input txq is valid or not.
1375 * If input txq is not greater than any of maximum number
1376 * of TX queues of all ports, it is valid.
1377 * if valid, return 0, else return -1
1380 check_nb_txq(queueid_t txq)
1382 queueid_t allowed_max_txq;
1385 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1386 if (txq > allowed_max_txq) {
1388 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1389 txq, allowed_max_txq, pid);
1396 * Get the allowed maximum number of RXDs of every rx queue.
1397 * *pid return the port id which has minimal value of
1398 * max_rxd in all queues of all ports.
1401 get_allowed_max_nb_rxd(portid_t *pid)
1403 uint16_t allowed_max_rxd = UINT16_MAX;
1405 struct rte_eth_dev_info dev_info;
1407 RTE_ETH_FOREACH_DEV(pi) {
1408 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1411 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1412 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1416 return allowed_max_rxd;
1420 * Get the allowed minimal number of RXDs of every rx queue.
1421 * *pid return the port id which has minimal value of
1422 * min_rxd in all queues of all ports.
1425 get_allowed_min_nb_rxd(portid_t *pid)
1427 uint16_t allowed_min_rxd = 0;
1429 struct rte_eth_dev_info dev_info;
1431 RTE_ETH_FOREACH_DEV(pi) {
1432 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1435 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1436 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1441 return allowed_min_rxd;
1445 * Check input rxd is valid or not.
1446 * If input rxd is not greater than any of maximum number
1447 * of RXDs of every Rx queues and is not less than any of
1448 * minimal number of RXDs of every Rx queues, it is valid.
1449 * if valid, return 0, else return -1
1452 check_nb_rxd(queueid_t rxd)
1454 uint16_t allowed_max_rxd;
1455 uint16_t allowed_min_rxd;
1458 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1459 if (rxd > allowed_max_rxd) {
1461 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1462 rxd, allowed_max_rxd, pid);
1466 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1467 if (rxd < allowed_min_rxd) {
1469 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1470 rxd, allowed_min_rxd, pid);
1478 * Get the allowed maximum number of TXDs of every rx queues.
1479 * *pid return the port id which has minimal value of
1480 * max_txd in every tx queue.
1483 get_allowed_max_nb_txd(portid_t *pid)
1485 uint16_t allowed_max_txd = UINT16_MAX;
1487 struct rte_eth_dev_info dev_info;
1489 RTE_ETH_FOREACH_DEV(pi) {
1490 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1493 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1494 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1498 return allowed_max_txd;
1502 * Get the allowed maximum number of TXDs of every tx queues.
1503 * *pid return the port id which has minimal value of
1504 * min_txd in every tx queue.
1507 get_allowed_min_nb_txd(portid_t *pid)
1509 uint16_t allowed_min_txd = 0;
1511 struct rte_eth_dev_info dev_info;
1513 RTE_ETH_FOREACH_DEV(pi) {
1514 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1517 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1518 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1523 return allowed_min_txd;
1527 * Check input txd is valid or not.
1528 * If input txd is not greater than any of maximum number
1529 * of TXDs of every Rx queues, it is valid.
1530 * if valid, return 0, else return -1
1533 check_nb_txd(queueid_t txd)
1535 uint16_t allowed_max_txd;
1536 uint16_t allowed_min_txd;
1539 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1540 if (txd > allowed_max_txd) {
1542 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1543 txd, allowed_max_txd, pid);
1547 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1548 if (txd < allowed_min_txd) {
1550 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1551 txd, allowed_min_txd, pid);
1559 * Get the allowed maximum number of hairpin queues.
1560 * *pid return the port id which has minimal value of
1561 * max_hairpin_queues in all ports.
1564 get_allowed_max_nb_hairpinq(portid_t *pid)
1566 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1568 struct rte_eth_hairpin_cap cap;
1570 RTE_ETH_FOREACH_DEV(pi) {
1571 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1575 if (cap.max_nb_queues < allowed_max_hairpinq) {
1576 allowed_max_hairpinq = cap.max_nb_queues;
1580 return allowed_max_hairpinq;
1584 * Check input hairpin is valid or not.
1585 * If input hairpin is not greater than any of maximum number
1586 * of hairpin queues of all ports, it is valid.
1587 * if valid, return 0, else return -1
1590 check_nb_hairpinq(queueid_t hairpinq)
1592 queueid_t allowed_max_hairpinq;
1595 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1596 if (hairpinq > allowed_max_hairpinq) {
1598 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1599 hairpinq, allowed_max_hairpinq, pid);
1606 get_eth_overhead(struct rte_eth_dev_info *dev_info)
1608 uint32_t eth_overhead;
1610 if (dev_info->max_mtu != UINT16_MAX &&
1611 dev_info->max_rx_pktlen > dev_info->max_mtu)
1612 eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
1614 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1616 return eth_overhead;
1620 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1622 struct rte_port *port = &ports[pid];
1626 eth_rx_metadata_negotiate_mp(pid);
1628 port->dev_conf.txmode = tx_mode;
1629 port->dev_conf.rxmode = rx_mode;
1631 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1633 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1635 if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
1636 port->dev_conf.txmode.offloads &=
1637 ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1639 /* Apply Rx offloads configuration */
1640 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1641 port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
1642 /* Apply Tx offloads configuration */
1643 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1644 port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
1647 port->dev_conf.link_speeds = eth_link_speed;
1650 port->dev_conf.rxmode.mtu = max_rx_pkt_len -
1651 get_eth_overhead(&port->dev_info);
1653 /* set flag to initialize port/queue */
1654 port->need_reconfig = 1;
1655 port->need_reconfig_queues = 1;
1656 port->socket_id = socket_id;
1657 port->tx_metadata = 0;
1660 * Check for maximum number of segments per MTU.
1661 * Accordingly update the mbuf data size.
1663 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1664 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1665 uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
1668 if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
1669 uint16_t data_size = (mtu + eth_overhead) /
1670 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1671 uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
1673 if (buffer_size > mbuf_data_size[0]) {
1674 mbuf_data_size[0] = buffer_size;
1675 TESTPMD_LOG(WARNING,
1676 "Configured mbuf size of the first segment %hu\n",
1687 struct rte_mempool *mbp;
1688 unsigned int nb_mbuf_per_pool;
1691 struct rte_gro_param gro_param;
1697 /* Configuration of logical cores. */
1698 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1699 sizeof(struct fwd_lcore *) * nb_lcores,
1700 RTE_CACHE_LINE_SIZE);
1701 if (fwd_lcores == NULL) {
1702 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1703 "failed\n", nb_lcores);
1705 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1706 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1707 sizeof(struct fwd_lcore),
1708 RTE_CACHE_LINE_SIZE);
1709 if (fwd_lcores[lc_id] == NULL) {
1710 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1713 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1716 RTE_ETH_FOREACH_DEV(pid) {
1720 socket_id = port_numa[pid];
1721 if (port_numa[pid] == NUMA_NO_CONFIG) {
1722 socket_id = rte_eth_dev_socket_id(pid);
1725 * if socket_id is invalid,
1726 * set to the first available socket.
1728 if (check_socket_id(socket_id) < 0)
1729 socket_id = socket_ids[0];
1732 socket_id = (socket_num == UMA_NO_CONFIG) ?
1735 /* Apply default TxRx configuration for all ports */
1736 init_config_port_offloads(pid, socket_id);
1739 * Create pools of mbuf.
1740 * If NUMA support is disabled, create a single pool of mbuf in
1741 * socket 0 memory by default.
1742 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1744 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1745 * nb_txd can be configured at run time.
1747 if (param_total_num_mbufs)
1748 nb_mbuf_per_pool = param_total_num_mbufs;
1750 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1751 (nb_lcores * mb_mempool_cache) +
1752 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1753 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1759 for (i = 0; i < num_sockets; i++)
1760 for (j = 0; j < mbuf_data_size_n; j++)
1761 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1762 mbuf_pool_create(mbuf_data_size[j],
1768 for (i = 0; i < mbuf_data_size_n; i++)
1769 mempools[i] = mbuf_pool_create
1772 socket_num == UMA_NO_CONFIG ?
1779 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1780 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
1783 * Records which Mbuf pool to use by each logical core, if needed.
1785 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1786 mbp = mbuf_pool_find(
1787 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1790 mbp = mbuf_pool_find(0, 0);
1791 fwd_lcores[lc_id]->mbp = mbp;
1793 /* initialize GSO context */
1794 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1795 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1796 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1797 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1799 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1806 /* create a gro context for each lcore */
1807 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1808 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1809 gro_param.max_item_per_flow = MAX_PKT_BURST;
1810 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1811 gro_param.socket_id = rte_lcore_to_socket_id(
1812 fwd_lcores_cpuids[lc_id]);
1813 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1814 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1815 rte_exit(EXIT_FAILURE,
1816 "rte_gro_ctx_create() failed\n");
1824 reconfig(portid_t new_port_id, unsigned socket_id)
1826 /* Reconfiguration of Ethernet ports. */
1827 init_config_port_offloads(new_port_id, socket_id);
1832 init_fwd_streams(void)
1835 struct rte_port *port;
1836 streamid_t sm_id, nb_fwd_streams_new;
1839 /* set socket id according to numa or not */
1840 RTE_ETH_FOREACH_DEV(pid) {
1842 if (nb_rxq > port->dev_info.max_rx_queues) {
1844 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1845 nb_rxq, port->dev_info.max_rx_queues);
1848 if (nb_txq > port->dev_info.max_tx_queues) {
1850 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1851 nb_txq, port->dev_info.max_tx_queues);
1855 if (port_numa[pid] != NUMA_NO_CONFIG)
1856 port->socket_id = port_numa[pid];
1858 port->socket_id = rte_eth_dev_socket_id(pid);
1861 * if socket_id is invalid,
1862 * set to the first available socket.
1864 if (check_socket_id(port->socket_id) < 0)
1865 port->socket_id = socket_ids[0];
1869 if (socket_num == UMA_NO_CONFIG)
1870 port->socket_id = 0;
1872 port->socket_id = socket_num;
1876 q = RTE_MAX(nb_rxq, nb_txq);
1879 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1882 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1883 if (nb_fwd_streams_new == nb_fwd_streams)
1886 if (fwd_streams != NULL) {
1887 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1888 if (fwd_streams[sm_id] == NULL)
1890 rte_free(fwd_streams[sm_id]);
1891 fwd_streams[sm_id] = NULL;
1893 rte_free(fwd_streams);
1898 nb_fwd_streams = nb_fwd_streams_new;
1899 if (nb_fwd_streams) {
1900 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1901 sizeof(struct fwd_stream *) * nb_fwd_streams,
1902 RTE_CACHE_LINE_SIZE);
1903 if (fwd_streams == NULL)
1904 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1905 " (struct fwd_stream *)) failed\n",
1908 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1909 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1910 " struct fwd_stream", sizeof(struct fwd_stream),
1911 RTE_CACHE_LINE_SIZE);
1912 if (fwd_streams[sm_id] == NULL)
1913 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1914 "(struct fwd_stream) failed\n");
1922 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1924 uint64_t total_burst, sburst;
1926 uint64_t burst_stats[4];
1927 uint16_t pktnb_stats[4];
1929 int burst_percent[4], sburstp;
1933 * First compute the total number of packet bursts and the
1934 * two highest numbers of bursts of the same number of packets.
1936 memset(&burst_stats, 0x0, sizeof(burst_stats));
1937 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1939 /* Show stats for 0 burst size always */
1940 total_burst = pbs->pkt_burst_spread[0];
1941 burst_stats[0] = pbs->pkt_burst_spread[0];
1944 /* Find the next 2 burst sizes with highest occurrences. */
1945 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
1946 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1951 total_burst += nb_burst;
1953 if (nb_burst > burst_stats[1]) {
1954 burst_stats[2] = burst_stats[1];
1955 pktnb_stats[2] = pktnb_stats[1];
1956 burst_stats[1] = nb_burst;
1957 pktnb_stats[1] = nb_pkt;
1958 } else if (nb_burst > burst_stats[2]) {
1959 burst_stats[2] = nb_burst;
1960 pktnb_stats[2] = nb_pkt;
1963 if (total_burst == 0)
1966 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1967 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1969 printf("%d%% of other]\n", 100 - sburstp);
1973 sburst += burst_stats[i];
1974 if (sburst == total_burst) {
1975 printf("%d%% of %d pkts]\n",
1976 100 - sburstp, (int) pktnb_stats[i]);
1981 (double)burst_stats[i] / total_burst * 100;
1982 printf("%d%% of %d pkts + ",
1983 burst_percent[i], (int) pktnb_stats[i]);
1984 sburstp += burst_percent[i];
1989 fwd_stream_stats_display(streamid_t stream_id)
1991 struct fwd_stream *fs;
1992 static const char *fwd_top_stats_border = "-------";
1994 fs = fwd_streams[stream_id];
1995 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1996 (fs->fwd_dropped == 0))
1998 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1999 "TX Port=%2d/Queue=%2d %s\n",
2000 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
2001 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
2002 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
2003 " TX-dropped: %-14"PRIu64,
2004 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
2006 /* if checksum mode */
2007 if (cur_fwd_eng == &csum_fwd_engine) {
2008 printf(" RX- bad IP checksum: %-14"PRIu64
2009 " Rx- bad L4 checksum: %-14"PRIu64
2010 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
2011 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
2012 fs->rx_bad_outer_l4_csum);
2013 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
2014 fs->rx_bad_outer_ip_csum);
2019 if (record_burst_stats) {
2020 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
2021 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
2026 fwd_stats_display(void)
2028 static const char *fwd_stats_border = "----------------------";
2029 static const char *acc_stats_border = "+++++++++++++++";
2031 struct fwd_stream *rx_stream;
2032 struct fwd_stream *tx_stream;
2033 uint64_t tx_dropped;
2034 uint64_t rx_bad_ip_csum;
2035 uint64_t rx_bad_l4_csum;
2036 uint64_t rx_bad_outer_l4_csum;
2037 uint64_t rx_bad_outer_ip_csum;
2038 } ports_stats[RTE_MAX_ETHPORTS];
2039 uint64_t total_rx_dropped = 0;
2040 uint64_t total_tx_dropped = 0;
2041 uint64_t total_rx_nombuf = 0;
2042 struct rte_eth_stats stats;
2043 uint64_t fwd_cycles = 0;
2044 uint64_t total_recv = 0;
2045 uint64_t total_xmit = 0;
2046 struct rte_port *port;
2052 memset(ports_stats, 0, sizeof(ports_stats));
2054 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2055 struct fwd_stream *fs = fwd_streams[sm_id];
2057 if (cur_fwd_config.nb_fwd_streams >
2058 cur_fwd_config.nb_fwd_ports) {
2059 fwd_stream_stats_display(sm_id);
2061 ports_stats[fs->tx_port].tx_stream = fs;
2062 ports_stats[fs->rx_port].rx_stream = fs;
2065 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
2067 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
2068 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
2069 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
2070 fs->rx_bad_outer_l4_csum;
2071 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
2072 fs->rx_bad_outer_ip_csum;
2074 if (record_core_cycles)
2075 fwd_cycles += fs->core_cycles;
2077 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2078 pt_id = fwd_ports_ids[i];
2079 port = &ports[pt_id];
2081 ret = rte_eth_stats_get(pt_id, &stats);
2084 "%s: Error: failed to get stats (port %u): %d",
2085 __func__, pt_id, ret);
2088 stats.ipackets -= port->stats.ipackets;
2089 stats.opackets -= port->stats.opackets;
2090 stats.ibytes -= port->stats.ibytes;
2091 stats.obytes -= port->stats.obytes;
2092 stats.imissed -= port->stats.imissed;
2093 stats.oerrors -= port->stats.oerrors;
2094 stats.rx_nombuf -= port->stats.rx_nombuf;
2096 total_recv += stats.ipackets;
2097 total_xmit += stats.opackets;
2098 total_rx_dropped += stats.imissed;
2099 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2100 total_tx_dropped += stats.oerrors;
2101 total_rx_nombuf += stats.rx_nombuf;
2103 printf("\n %s Forward statistics for port %-2d %s\n",
2104 fwd_stats_border, pt_id, fwd_stats_border);
2106 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2107 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2108 stats.ipackets + stats.imissed);
2110 if (cur_fwd_eng == &csum_fwd_engine) {
2111 printf(" Bad-ipcsum: %-14"PRIu64
2112 " Bad-l4csum: %-14"PRIu64
2113 "Bad-outer-l4csum: %-14"PRIu64"\n",
2114 ports_stats[pt_id].rx_bad_ip_csum,
2115 ports_stats[pt_id].rx_bad_l4_csum,
2116 ports_stats[pt_id].rx_bad_outer_l4_csum);
2117 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2118 ports_stats[pt_id].rx_bad_outer_ip_csum);
2120 if (stats.ierrors + stats.rx_nombuf > 0) {
2121 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2122 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2125 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2126 "TX-total: %-"PRIu64"\n",
2127 stats.opackets, ports_stats[pt_id].tx_dropped,
2128 stats.opackets + ports_stats[pt_id].tx_dropped);
2130 if (record_burst_stats) {
2131 if (ports_stats[pt_id].rx_stream)
2132 pkt_burst_stats_display("RX",
2133 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2134 if (ports_stats[pt_id].tx_stream)
2135 pkt_burst_stats_display("TX",
2136 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2139 printf(" %s--------------------------------%s\n",
2140 fwd_stats_border, fwd_stats_border);
2143 printf("\n %s Accumulated forward statistics for all ports"
2145 acc_stats_border, acc_stats_border);
2146 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2148 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2150 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2151 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2152 if (total_rx_nombuf > 0)
2153 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2154 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2156 acc_stats_border, acc_stats_border);
2157 if (record_core_cycles) {
2158 #define CYC_PER_MHZ 1E6
2159 if (total_recv > 0 || total_xmit > 0) {
2160 uint64_t total_pkts = 0;
2161 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2162 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2163 total_pkts = total_xmit;
2165 total_pkts = total_recv;
2167 printf("\n CPU cycles/packet=%.2F (total cycles="
2168 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2170 (double) fwd_cycles / total_pkts,
2171 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2172 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2178 fwd_stats_reset(void)
2185 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2186 pt_id = fwd_ports_ids[i];
2187 ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2190 "%s: Error: failed to clear stats (port %u):%d",
2191 __func__, pt_id, ret);
2193 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2194 struct fwd_stream *fs = fwd_streams[sm_id];
2198 fs->fwd_dropped = 0;
2199 fs->rx_bad_ip_csum = 0;
2200 fs->rx_bad_l4_csum = 0;
2201 fs->rx_bad_outer_l4_csum = 0;
2202 fs->rx_bad_outer_ip_csum = 0;
2204 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2205 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2206 fs->core_cycles = 0;
2211 flush_fwd_rx_queues(void)
2213 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2220 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2221 uint64_t timer_period;
2223 if (num_procs > 1) {
2224 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2228 /* convert to number of cycles */
2229 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2231 for (j = 0; j < 2; j++) {
2232 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2233 for (rxq = 0; rxq < nb_rxq; rxq++) {
2234 port_id = fwd_ports_ids[rxp];
2236 /* Polling stopped queues is prohibited. */
2237 if (ports[port_id].rxq[rxq].state ==
2238 RTE_ETH_QUEUE_STATE_STOPPED)
2242 * testpmd can stuck in the below do while loop
2243 * if rte_eth_rx_burst() always returns nonzero
2244 * packets. So timer is added to exit this loop
2245 * after 1sec timer expiry.
2247 prev_tsc = rte_rdtsc();
2249 nb_rx = rte_eth_rx_burst(port_id, rxq,
2250 pkts_burst, MAX_PKT_BURST);
2251 for (i = 0; i < nb_rx; i++)
2252 rte_pktmbuf_free(pkts_burst[i]);
2254 cur_tsc = rte_rdtsc();
2255 diff_tsc = cur_tsc - prev_tsc;
2256 timer_tsc += diff_tsc;
2257 } while ((nb_rx > 0) &&
2258 (timer_tsc < timer_period));
2262 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2267 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2269 struct fwd_stream **fsm;
2272 #ifdef RTE_LIB_BITRATESTATS
2273 uint64_t tics_per_1sec;
2274 uint64_t tics_datum;
2275 uint64_t tics_current;
2276 uint16_t i, cnt_ports;
2278 cnt_ports = nb_ports;
2279 tics_datum = rte_rdtsc();
2280 tics_per_1sec = rte_get_timer_hz();
2282 fsm = &fwd_streams[fc->stream_idx];
2283 nb_fs = fc->stream_nb;
2285 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2286 if (!fsm[sm_id]->disabled)
2287 (*pkt_fwd)(fsm[sm_id]);
2288 #ifdef RTE_LIB_BITRATESTATS
2289 if (bitrate_enabled != 0 &&
2290 bitrate_lcore_id == rte_lcore_id()) {
2291 tics_current = rte_rdtsc();
2292 if (tics_current - tics_datum >= tics_per_1sec) {
2293 /* Periodic bitrate calculation */
2294 for (i = 0; i < cnt_ports; i++)
2295 rte_stats_bitrate_calc(bitrate_data,
2297 tics_datum = tics_current;
2301 #ifdef RTE_LIB_LATENCYSTATS
2302 if (latencystats_enabled != 0 &&
2303 latencystats_lcore_id == rte_lcore_id())
2304 rte_latencystats_update();
2307 } while (! fc->stopped);
2311 start_pkt_forward_on_core(void *fwd_arg)
2313 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2314 cur_fwd_config.fwd_eng->packet_fwd);
2319 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2320 * Used to start communication flows in network loopback test configurations.
2323 run_one_txonly_burst_on_core(void *fwd_arg)
2325 struct fwd_lcore *fwd_lc;
2326 struct fwd_lcore tmp_lcore;
2328 fwd_lc = (struct fwd_lcore *) fwd_arg;
2329 tmp_lcore = *fwd_lc;
2330 tmp_lcore.stopped = 1;
2331 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2336 * Launch packet forwarding:
2337 * - Setup per-port forwarding context.
2338 * - launch logical cores with their forwarding configuration.
2341 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2347 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2348 lc_id = fwd_lcores_cpuids[i];
2349 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2350 fwd_lcores[i]->stopped = 0;
2351 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2352 fwd_lcores[i], lc_id);
2355 "launch lcore %u failed - diag=%d\n",
2362 * Launch packet forwarding configuration.
2365 start_packet_forwarding(int with_tx_first)
2367 port_fwd_begin_t port_fwd_begin;
2368 port_fwd_end_t port_fwd_end;
2369 stream_init_t stream_init = cur_fwd_eng->stream_init;
2372 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2373 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2375 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2376 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2378 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2379 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2380 (!nb_rxq || !nb_txq))
2381 rte_exit(EXIT_FAILURE,
2382 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2383 cur_fwd_eng->fwd_mode_name);
2385 if (all_ports_started() == 0) {
2386 fprintf(stderr, "Not all ports were started\n");
2389 if (test_done == 0) {
2390 fprintf(stderr, "Packet forwarding already started\n");
2396 pkt_fwd_config_display(&cur_fwd_config);
2397 if (!pkt_fwd_shared_rxq_check())
2400 if (stream_init != NULL)
2401 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
2402 stream_init(fwd_streams[i]);
2404 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2405 if (port_fwd_begin != NULL) {
2406 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2407 if (port_fwd_begin(fwd_ports_ids[i])) {
2409 "Packet forwarding is not ready\n");
2415 if (with_tx_first) {
2416 port_fwd_begin = tx_only_engine.port_fwd_begin;
2417 if (port_fwd_begin != NULL) {
2418 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2419 if (port_fwd_begin(fwd_ports_ids[i])) {
2421 "Packet forwarding is not ready\n");
2431 flush_fwd_rx_queues();
2433 rxtx_config_display();
2436 if (with_tx_first) {
2437 while (with_tx_first--) {
2438 launch_packet_forwarding(
2439 run_one_txonly_burst_on_core);
2440 rte_eal_mp_wait_lcore();
2442 port_fwd_end = tx_only_engine.port_fwd_end;
2443 if (port_fwd_end != NULL) {
2444 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2445 (*port_fwd_end)(fwd_ports_ids[i]);
2448 launch_packet_forwarding(start_pkt_forward_on_core);
2452 stop_packet_forwarding(void)
2454 port_fwd_end_t port_fwd_end;
2460 fprintf(stderr, "Packet forwarding not started\n");
2463 printf("Telling cores to stop...");
2464 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2465 fwd_lcores[lc_id]->stopped = 1;
2466 printf("\nWaiting for lcores to finish...\n");
2467 rte_eal_mp_wait_lcore();
2468 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2469 if (port_fwd_end != NULL) {
2470 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2471 pt_id = fwd_ports_ids[i];
2472 (*port_fwd_end)(pt_id);
2476 fwd_stats_display();
2478 printf("\nDone.\n");
2483 dev_set_link_up(portid_t pid)
2485 if (rte_eth_dev_set_link_up(pid) < 0)
2486 fprintf(stderr, "\nSet link up fail.\n");
2490 dev_set_link_down(portid_t pid)
2492 if (rte_eth_dev_set_link_down(pid) < 0)
2493 fprintf(stderr, "\nSet link down fail.\n");
2497 all_ports_started(void)
2500 struct rte_port *port;
2502 RTE_ETH_FOREACH_DEV(pi) {
2504 /* Check if there is a port which is not started */
2505 if ((port->port_status != RTE_PORT_STARTED) &&
2506 (port->slave_flag == 0))
2510 /* No port is not started */
2515 port_is_stopped(portid_t port_id)
2517 struct rte_port *port = &ports[port_id];
2519 if ((port->port_status != RTE_PORT_STOPPED) &&
2520 (port->slave_flag == 0))
2526 all_ports_stopped(void)
2530 RTE_ETH_FOREACH_DEV(pi) {
2531 if (!port_is_stopped(pi))
2539 port_is_started(portid_t port_id)
2541 if (port_id_is_invalid(port_id, ENABLED_WARN))
2544 if (ports[port_id].port_status != RTE_PORT_STARTED)
2550 /* Configure the Rx and Tx hairpin queues for the selected port. */
2552 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2555 struct rte_eth_hairpin_conf hairpin_conf = {
2560 struct rte_port *port = &ports[pi];
2561 uint16_t peer_rx_port = pi;
2562 uint16_t peer_tx_port = pi;
2563 uint32_t manual = 1;
2564 uint32_t tx_exp = hairpin_mode & 0x10;
2566 if (!(hairpin_mode & 0xf)) {
2570 } else if (hairpin_mode & 0x1) {
2571 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2572 RTE_ETH_DEV_NO_OWNER);
2573 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2574 peer_tx_port = rte_eth_find_next_owned_by(0,
2575 RTE_ETH_DEV_NO_OWNER);
2576 if (p_pi != RTE_MAX_ETHPORTS) {
2577 peer_rx_port = p_pi;
2581 /* Last port will be the peer RX port of the first. */
2582 RTE_ETH_FOREACH_DEV(next_pi)
2583 peer_rx_port = next_pi;
2586 } else if (hairpin_mode & 0x2) {
2588 peer_rx_port = p_pi;
2590 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2591 RTE_ETH_DEV_NO_OWNER);
2592 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2595 peer_tx_port = peer_rx_port;
2599 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2600 hairpin_conf.peers[0].port = peer_rx_port;
2601 hairpin_conf.peers[0].queue = i + nb_rxq;
2602 hairpin_conf.manual_bind = !!manual;
2603 hairpin_conf.tx_explicit = !!tx_exp;
2604 diag = rte_eth_tx_hairpin_queue_setup
2605 (pi, qi, nb_txd, &hairpin_conf);
2610 /* Fail to setup rx queue, return */
2611 if (port->port_status == RTE_PORT_HANDLING)
2612 port->port_status = RTE_PORT_STOPPED;
2615 "Port %d can not be set back to stopped\n", pi);
2616 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2618 /* try to reconfigure queues next time */
2619 port->need_reconfig_queues = 1;
2622 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2623 hairpin_conf.peers[0].port = peer_tx_port;
2624 hairpin_conf.peers[0].queue = i + nb_txq;
2625 hairpin_conf.manual_bind = !!manual;
2626 hairpin_conf.tx_explicit = !!tx_exp;
2627 diag = rte_eth_rx_hairpin_queue_setup
2628 (pi, qi, nb_rxd, &hairpin_conf);
2633 /* Fail to setup rx queue, return */
2634 if (port->port_status == RTE_PORT_HANDLING)
2635 port->port_status = RTE_PORT_STOPPED;
2638 "Port %d can not be set back to stopped\n", pi);
2639 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2641 /* try to reconfigure queues next time */
2642 port->need_reconfig_queues = 1;
2648 /* Configure the Rx with optional split. */
2650 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2651 uint16_t nb_rx_desc, unsigned int socket_id,
2652 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2654 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2655 unsigned int i, mp_n;
2658 if (rx_pkt_nb_segs <= 1 ||
2659 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2660 rx_conf->rx_seg = NULL;
2661 rx_conf->rx_nseg = 0;
2662 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2663 nb_rx_desc, socket_id,
2667 for (i = 0; i < rx_pkt_nb_segs; i++) {
2668 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2669 struct rte_mempool *mpx;
2671 * Use last valid pool for the segments with number
2672 * exceeding the pool index.
2674 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2675 mpx = mbuf_pool_find(socket_id, mp_n);
2676 /* Handle zero as mbuf data buffer size. */
2677 rx_seg->length = rx_pkt_seg_lengths[i] ?
2678 rx_pkt_seg_lengths[i] :
2679 mbuf_data_size[mp_n];
2680 rx_seg->offset = i < rx_pkt_nb_offs ?
2681 rx_pkt_seg_offsets[i] : 0;
2682 rx_seg->mp = mpx ? mpx : mp;
2684 rx_conf->rx_nseg = rx_pkt_nb_segs;
2685 rx_conf->rx_seg = rx_useg;
2686 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2687 socket_id, rx_conf, NULL);
2688 rx_conf->rx_seg = NULL;
2689 rx_conf->rx_nseg = 0;
2691 ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
2692 RTE_ETH_QUEUE_STATE_STOPPED :
2693 RTE_ETH_QUEUE_STATE_STARTED;
2698 alloc_xstats_display_info(portid_t pi)
2700 uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
2701 uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
2702 uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
2704 if (xstats_display_num == 0)
2707 *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
2708 if (*ids_supp == NULL)
2711 *prev_values = calloc(xstats_display_num,
2712 sizeof(**prev_values));
2713 if (*prev_values == NULL)
2714 goto fail_prev_values;
2716 *curr_values = calloc(xstats_display_num,
2717 sizeof(**curr_values));
2718 if (*curr_values == NULL)
2719 goto fail_curr_values;
2721 ports[pi].xstats_info.allocated = true;
2734 free_xstats_display_info(portid_t pi)
2736 if (!ports[pi].xstats_info.allocated)
2738 free(ports[pi].xstats_info.ids_supp);
2739 free(ports[pi].xstats_info.prev_values);
2740 free(ports[pi].xstats_info.curr_values);
2741 ports[pi].xstats_info.allocated = false;
2744 /** Fill helper structures for specified port to show extended statistics. */
2746 fill_xstats_display_info_for_port(portid_t pi)
2748 unsigned int stat, stat_supp;
2749 const char *xstat_name;
2750 struct rte_port *port;
2754 if (xstats_display_num == 0)
2757 if (pi == (portid_t)RTE_PORT_ALL) {
2758 fill_xstats_display_info();
2763 if (port->port_status != RTE_PORT_STARTED)
2766 if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0)
2767 rte_exit(EXIT_FAILURE,
2768 "Failed to allocate xstats display memory\n");
2770 ids_supp = port->xstats_info.ids_supp;
2771 for (stat = stat_supp = 0; stat < xstats_display_num; stat++) {
2772 xstat_name = xstats_display[stat].name;
2773 rc = rte_eth_xstats_get_id_by_name(pi, xstat_name,
2774 ids_supp + stat_supp);
2776 fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n",
2777 xstat_name, pi, stat);
2783 port->xstats_info.ids_supp_sz = stat_supp;
2786 /** Fill helper structures for all ports to show extended statistics. */
2788 fill_xstats_display_info(void)
2792 if (xstats_display_num == 0)
2795 RTE_ETH_FOREACH_DEV(pi)
2796 fill_xstats_display_info_for_port(pi);
2800 start_port(portid_t pid)
2802 int diag, need_check_link_status = -1;
2804 portid_t p_pi = RTE_MAX_ETHPORTS;
2805 portid_t pl[RTE_MAX_ETHPORTS];
2806 portid_t peer_pl[RTE_MAX_ETHPORTS];
2807 uint16_t cnt_pi = 0;
2808 uint16_t cfg_pi = 0;
2811 struct rte_port *port;
2812 struct rte_eth_hairpin_cap cap;
2814 if (port_id_is_invalid(pid, ENABLED_WARN))
2817 RTE_ETH_FOREACH_DEV(pi) {
2818 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2821 if (port_is_bonding_slave(pi)) {
2823 "Please remove port %d from bonded device.\n",
2828 need_check_link_status = 0;
2830 if (port->port_status == RTE_PORT_STOPPED)
2831 port->port_status = RTE_PORT_HANDLING;
2833 fprintf(stderr, "Port %d is now not stopped\n", pi);
2837 if (port->need_reconfig > 0) {
2838 struct rte_eth_conf dev_conf;
2841 port->need_reconfig = 0;
2843 if (flow_isolate_all) {
2844 int ret = port_flow_isolate(pi, 1);
2847 "Failed to apply isolated mode on port %d\n",
2852 configure_rxtx_dump_callbacks(0);
2853 printf("Configuring Port %d (socket %u)\n", pi,
2855 if (nb_hairpinq > 0 &&
2856 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2858 "Port %d doesn't support hairpin queues\n",
2863 /* configure port */
2864 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2865 nb_txq + nb_hairpinq,
2868 if (port->port_status == RTE_PORT_HANDLING)
2869 port->port_status = RTE_PORT_STOPPED;
2872 "Port %d can not be set back to stopped\n",
2874 fprintf(stderr, "Fail to configure port %d\n",
2876 /* try to reconfigure port next time */
2877 port->need_reconfig = 1;
2880 /* get device configuration*/
2882 eth_dev_conf_get_print_err(pi, &dev_conf)) {
2884 "port %d can not get device configuration\n",
2888 /* Apply Rx offloads configuration */
2889 if (dev_conf.rxmode.offloads !=
2890 port->dev_conf.rxmode.offloads) {
2891 port->dev_conf.rxmode.offloads |=
2892 dev_conf.rxmode.offloads;
2894 k < port->dev_info.max_rx_queues;
2896 port->rxq[k].conf.offloads |=
2897 dev_conf.rxmode.offloads;
2899 /* Apply Tx offloads configuration */
2900 if (dev_conf.txmode.offloads !=
2901 port->dev_conf.txmode.offloads) {
2902 port->dev_conf.txmode.offloads |=
2903 dev_conf.txmode.offloads;
2905 k < port->dev_info.max_tx_queues;
2907 port->txq[k].conf.offloads |=
2908 dev_conf.txmode.offloads;
2911 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2912 port->need_reconfig_queues = 0;
2913 /* setup tx queues */
2914 for (qi = 0; qi < nb_txq; qi++) {
2915 struct rte_eth_txconf *conf =
2916 &port->txq[qi].conf;
2918 if ((numa_support) &&
2919 (txring_numa[pi] != NUMA_NO_CONFIG))
2920 diag = rte_eth_tx_queue_setup(pi, qi,
2921 port->nb_tx_desc[qi],
2923 &(port->txq[qi].conf));
2925 diag = rte_eth_tx_queue_setup(pi, qi,
2926 port->nb_tx_desc[qi],
2928 &(port->txq[qi].conf));
2931 port->txq[qi].state =
2932 conf->tx_deferred_start ?
2933 RTE_ETH_QUEUE_STATE_STOPPED :
2934 RTE_ETH_QUEUE_STATE_STARTED;
2938 /* Fail to setup tx queue, return */
2939 if (port->port_status == RTE_PORT_HANDLING)
2940 port->port_status = RTE_PORT_STOPPED;
2943 "Port %d can not be set back to stopped\n",
2946 "Fail to configure port %d tx queues\n",
2948 /* try to reconfigure queues next time */
2949 port->need_reconfig_queues = 1;
2952 for (qi = 0; qi < nb_rxq; qi++) {
2953 /* setup rx queues */
2954 if ((numa_support) &&
2955 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2956 struct rte_mempool * mp =
2958 (rxring_numa[pi], 0);
2961 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2966 diag = rx_queue_setup(pi, qi,
2967 port->nb_rx_desc[qi],
2969 &(port->rxq[qi].conf),
2972 struct rte_mempool *mp =
2974 (port->socket_id, 0);
2977 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2981 diag = rx_queue_setup(pi, qi,
2982 port->nb_rx_desc[qi],
2984 &(port->rxq[qi].conf),
2990 /* Fail to setup rx queue, return */
2991 if (port->port_status == RTE_PORT_HANDLING)
2992 port->port_status = RTE_PORT_STOPPED;
2995 "Port %d can not be set back to stopped\n",
2998 "Fail to configure port %d rx queues\n",
3000 /* try to reconfigure queues next time */
3001 port->need_reconfig_queues = 1;
3004 /* setup hairpin queues */
3005 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
3008 configure_rxtx_dump_callbacks(verbose_level);
3010 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
3014 "Port %d: Failed to disable Ptype parsing\n",
3022 diag = eth_dev_start_mp(pi);
3024 fprintf(stderr, "Fail to start port %d: %s\n",
3025 pi, rte_strerror(-diag));
3027 /* Fail to setup rx queue, return */
3028 if (port->port_status == RTE_PORT_HANDLING)
3029 port->port_status = RTE_PORT_STOPPED;
3032 "Port %d can not be set back to stopped\n",
3037 if (port->port_status == RTE_PORT_HANDLING)
3038 port->port_status = RTE_PORT_STARTED;
3040 fprintf(stderr, "Port %d can not be set into started\n",
3043 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
3044 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
3045 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
3047 /* at least one port started, need checking link status */
3048 need_check_link_status = 1;
3053 if (need_check_link_status == 1 && !no_link_check)
3054 check_all_ports_link_status(RTE_PORT_ALL);
3055 else if (need_check_link_status == 0)
3056 fprintf(stderr, "Please stop the ports first\n");
3058 if (hairpin_mode & 0xf) {
3062 /* bind all started hairpin ports */
3063 for (i = 0; i < cfg_pi; i++) {
3065 /* bind current Tx to all peer Rx */
3066 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3067 RTE_MAX_ETHPORTS, 1);
3070 for (j = 0; j < peer_pi; j++) {
3071 if (!port_is_started(peer_pl[j]))
3073 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
3076 "Error during binding hairpin Tx port %u to %u: %s\n",
3078 rte_strerror(-diag));
3082 /* bind all peer Tx to current Rx */
3083 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3084 RTE_MAX_ETHPORTS, 0);
3087 for (j = 0; j < peer_pi; j++) {
3088 if (!port_is_started(peer_pl[j]))
3090 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
3093 "Error during binding hairpin Tx port %u to %u: %s\n",
3095 rte_strerror(-diag));
3102 fill_xstats_display_info_for_port(pid);
3109 stop_port(portid_t pid)
3112 struct rte_port *port;
3113 int need_check_link_status = 0;
3114 portid_t peer_pl[RTE_MAX_ETHPORTS];
3117 if (port_id_is_invalid(pid, ENABLED_WARN))
3120 printf("Stopping ports...\n");
3122 RTE_ETH_FOREACH_DEV(pi) {
3123 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3126 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3128 "Please remove port %d from forwarding configuration.\n",
3133 if (port_is_bonding_slave(pi)) {
3135 "Please remove port %d from bonded device.\n",
3141 if (port->port_status == RTE_PORT_STARTED)
3142 port->port_status = RTE_PORT_HANDLING;
3146 if (hairpin_mode & 0xf) {
3149 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
3150 /* unbind all peer Tx from current Rx */
3151 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
3152 RTE_MAX_ETHPORTS, 0);
3155 for (j = 0; j < peer_pi; j++) {
3156 if (!port_is_started(peer_pl[j]))
3158 rte_eth_hairpin_unbind(peer_pl[j], pi);
3162 if (port->flow_list)
3163 port_flow_flush(pi);
3165 if (eth_dev_stop_mp(pi) != 0)
3166 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
3169 if (port->port_status == RTE_PORT_HANDLING)
3170 port->port_status = RTE_PORT_STOPPED;
3172 fprintf(stderr, "Port %d can not be set into stopped\n",
3174 need_check_link_status = 1;
3176 if (need_check_link_status && !no_link_check)
3177 check_all_ports_link_status(RTE_PORT_ALL);
3183 remove_invalid_ports_in(portid_t *array, portid_t *total)
3186 portid_t new_total = 0;
3188 for (i = 0; i < *total; i++)
3189 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
3190 array[new_total] = array[i];
3197 remove_invalid_ports(void)
3199 remove_invalid_ports_in(ports_ids, &nb_ports);
3200 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
3201 nb_cfg_ports = nb_fwd_ports;
3205 close_port(portid_t pid)
3208 struct rte_port *port;
3210 if (port_id_is_invalid(pid, ENABLED_WARN))
3213 printf("Closing ports...\n");
3215 RTE_ETH_FOREACH_DEV(pi) {
3216 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3219 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3221 "Please remove port %d from forwarding configuration.\n",
3226 if (port_is_bonding_slave(pi)) {
3228 "Please remove port %d from bonded device.\n",
3234 if (port->port_status == RTE_PORT_CLOSED) {
3235 fprintf(stderr, "Port %d is already closed\n", pi);
3239 if (is_proc_primary()) {
3240 port_flow_flush(pi);
3241 port_flex_item_flush(pi);
3242 port_action_handle_flush(pi);
3243 rte_eth_dev_close(pi);
3246 free_xstats_display_info(pi);
3249 remove_invalid_ports();
3254 reset_port(portid_t pid)
3258 struct rte_port *port;
3260 if (port_id_is_invalid(pid, ENABLED_WARN))
3263 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
3264 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
3266 "Can not reset port(s), please stop port(s) first.\n");
3270 printf("Resetting ports...\n");
3272 RTE_ETH_FOREACH_DEV(pi) {
3273 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
3276 if (port_is_forwarding(pi) != 0 && test_done == 0) {
3278 "Please remove port %d from forwarding configuration.\n",
3283 if (port_is_bonding_slave(pi)) {
3285 "Please remove port %d from bonded device.\n",
3290 diag = rte_eth_dev_reset(pi);
3293 port->need_reconfig = 1;
3294 port->need_reconfig_queues = 1;
3296 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3305 attach_port(char *identifier)
3308 struct rte_dev_iterator iterator;
3310 printf("Attaching a new port...\n");
3312 if (identifier == NULL) {
3313 fprintf(stderr, "Invalid parameters are specified\n");
3317 if (rte_dev_probe(identifier) < 0) {
3318 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3322 /* first attach mode: event */
3323 if (setup_on_probe_event) {
3324 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3325 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3326 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3327 ports[pi].need_setup != 0)
3328 setup_attached_port(pi);
3332 /* second attach mode: iterator */
3333 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3334 /* setup ports matching the devargs used for probing */
3335 if (port_is_forwarding(pi))
3336 continue; /* port was already attached before */
3337 setup_attached_port(pi);
3342 setup_attached_port(portid_t pi)
3344 unsigned int socket_id;
3347 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3348 /* if socket_id is invalid, set to the first available socket. */
3349 if (check_socket_id(socket_id) < 0)
3350 socket_id = socket_ids[0];
3351 reconfig(pi, socket_id);
3352 ret = rte_eth_promiscuous_enable(pi);
3355 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3356 pi, rte_strerror(-ret));
3358 ports_ids[nb_ports++] = pi;
3359 fwd_ports_ids[nb_fwd_ports++] = pi;
3360 nb_cfg_ports = nb_fwd_ports;
3361 ports[pi].need_setup = 0;
3362 ports[pi].port_status = RTE_PORT_STOPPED;
3364 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3369 detach_device(struct rte_device *dev)
3374 fprintf(stderr, "Device already removed\n");
3378 printf("Removing a device...\n");
3380 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3381 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3382 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3383 fprintf(stderr, "Port %u not stopped\n",
3387 port_flow_flush(sibling);
3391 if (rte_dev_remove(dev) < 0) {
3392 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3395 remove_invalid_ports();
3397 printf("Device is detached\n");
3398 printf("Now total ports is %d\n", nb_ports);
3404 detach_port_device(portid_t port_id)
3407 struct rte_eth_dev_info dev_info;
3409 if (port_id_is_invalid(port_id, ENABLED_WARN))
3412 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3413 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3414 fprintf(stderr, "Port not stopped\n");
3417 fprintf(stderr, "Port was not closed\n");
3420 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3423 "Failed to get device info for port %d, not detaching\n",
3427 detach_device(dev_info.device);
3431 detach_devargs(char *identifier)
3433 struct rte_dev_iterator iterator;
3434 struct rte_devargs da;
3437 printf("Removing a device...\n");
3439 memset(&da, 0, sizeof(da));
3440 if (rte_devargs_parsef(&da, "%s", identifier)) {
3441 fprintf(stderr, "cannot parse identifier\n");
3445 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3446 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3447 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3448 fprintf(stderr, "Port %u not stopped\n",
3450 rte_eth_iterator_cleanup(&iterator);
3451 rte_devargs_reset(&da);
3454 port_flow_flush(port_id);
3458 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3459 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3460 da.name, da.bus->name);
3461 rte_devargs_reset(&da);
3465 remove_invalid_ports();
3467 printf("Device %s is detached\n", identifier);
3468 printf("Now total ports is %d\n", nb_ports);
3470 rte_devargs_reset(&da);
3481 stop_packet_forwarding();
3483 #ifndef RTE_EXEC_ENV_WINDOWS
3484 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3486 if (mp_alloc_type == MP_ALLOC_ANON)
3487 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3492 if (ports != NULL) {
3494 RTE_ETH_FOREACH_DEV(pt_id) {
3495 printf("\nStopping port %d...\n", pt_id);
3499 RTE_ETH_FOREACH_DEV(pt_id) {
3500 printf("\nShutting down port %d...\n", pt_id);
3507 ret = rte_dev_event_monitor_stop();
3510 "fail to stop device event monitor.");
3514 ret = rte_dev_event_callback_unregister(NULL,
3515 dev_event_callback, NULL);
3518 "fail to unregister device event callback.\n");
3522 ret = rte_dev_hotplug_handle_disable();
3525 "fail to disable hotplug handling.\n");
3529 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3531 mempool_free_mp(mempools[i]);
3533 free(xstats_display);
3535 printf("\nBye...\n");
3538 typedef void (*cmd_func_t)(void);
3539 struct pmd_test_command {
3540 const char *cmd_name;
3541 cmd_func_t cmd_func;
3544 /* Check the link status of all ports in up to 9s, and print them finally */
3546 check_all_ports_link_status(uint32_t port_mask)
3548 #define CHECK_INTERVAL 100 /* 100ms */
3549 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3551 uint8_t count, all_ports_up, print_flag = 0;
3552 struct rte_eth_link link;
3554 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3556 printf("Checking link statuses...\n");
3558 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3560 RTE_ETH_FOREACH_DEV(portid) {
3561 if ((port_mask & (1 << portid)) == 0)
3563 memset(&link, 0, sizeof(link));
3564 ret = rte_eth_link_get_nowait(portid, &link);
3567 if (print_flag == 1)
3569 "Port %u link get failed: %s\n",
3570 portid, rte_strerror(-ret));
3573 /* print link status if flag set */
3574 if (print_flag == 1) {
3575 rte_eth_link_to_str(link_status,
3576 sizeof(link_status), &link);
3577 printf("Port %d %s\n", portid, link_status);
3580 /* clear all_ports_up flag if any link down */
3581 if (link.link_status == RTE_ETH_LINK_DOWN) {
3586 /* after finally printing all link status, get out */
3587 if (print_flag == 1)
3590 if (all_ports_up == 0) {
3592 rte_delay_ms(CHECK_INTERVAL);
3595 /* set the print_flag if all ports up or timeout */
3596 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3606 rmv_port_callback(void *arg)
3608 int need_to_start = 0;
3609 int org_no_link_check = no_link_check;
3610 portid_t port_id = (intptr_t)arg;
3611 struct rte_eth_dev_info dev_info;
3614 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3616 if (!test_done && port_is_forwarding(port_id)) {
3618 stop_packet_forwarding();
3622 no_link_check = org_no_link_check;
3624 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3627 "Failed to get device info for port %d, not detaching\n",
3630 struct rte_device *device = dev_info.device;
3631 close_port(port_id);
3632 detach_device(device); /* might be already removed or have more ports */
3635 start_packet_forwarding(0);
3638 /* This function is used by the interrupt thread */
3640 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3643 RTE_SET_USED(param);
3644 RTE_SET_USED(ret_param);
3646 if (type >= RTE_ETH_EVENT_MAX) {
3648 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3649 port_id, __func__, type);
3651 } else if (event_print_mask & (UINT32_C(1) << type)) {
3652 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3653 eth_event_desc[type]);
3658 case RTE_ETH_EVENT_NEW:
3659 ports[port_id].need_setup = 1;
3660 ports[port_id].port_status = RTE_PORT_HANDLING;
3662 case RTE_ETH_EVENT_INTR_RMV:
3663 if (port_id_is_invalid(port_id, DISABLED_WARN))
3665 if (rte_eal_alarm_set(100000,
3666 rmv_port_callback, (void *)(intptr_t)port_id))
3668 "Could not set up deferred device removal\n");
3670 case RTE_ETH_EVENT_DESTROY:
3671 ports[port_id].port_status = RTE_PORT_CLOSED;
3672 printf("Port %u is closed\n", port_id);
3681 register_eth_event_callback(void)
3684 enum rte_eth_event_type event;
3686 for (event = RTE_ETH_EVENT_UNKNOWN;
3687 event < RTE_ETH_EVENT_MAX; event++) {
3688 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3693 TESTPMD_LOG(ERR, "Failed to register callback for "
3694 "%s event\n", eth_event_desc[event]);
3702 /* This function is used by the interrupt thread */
3704 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3705 __rte_unused void *arg)
3710 if (type >= RTE_DEV_EVENT_MAX) {
3711 fprintf(stderr, "%s called upon invalid event %d\n",
3717 case RTE_DEV_EVENT_REMOVE:
3718 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3720 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3722 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3727 * Because the user's callback is invoked in eal interrupt
3728 * callback, the interrupt callback need to be finished before
3729 * it can be unregistered when detaching device. So finish
3730 * callback soon and use a deferred removal to detach device
3731 * is need. It is a workaround, once the device detaching be
3732 * moved into the eal in the future, the deferred removal could
3735 if (rte_eal_alarm_set(100000,
3736 rmv_port_callback, (void *)(intptr_t)port_id))
3738 "Could not set up deferred device removal\n");
3740 case RTE_DEV_EVENT_ADD:
3741 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3743 /* TODO: After finish kernel driver binding,
3744 * begin to attach port.
3753 rxtx_port_config(portid_t pid)
3757 struct rte_port *port = &ports[pid];
3759 for (qid = 0; qid < nb_rxq; qid++) {
3760 offloads = port->rxq[qid].conf.offloads;
3761 port->rxq[qid].conf = port->dev_info.default_rxconf;
3763 if (rxq_share > 0 &&
3764 (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
3765 /* Non-zero share group to enable RxQ share. */
3766 port->rxq[qid].conf.share_group = pid / rxq_share + 1;
3767 port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
3771 port->rxq[qid].conf.offloads = offloads;
3773 /* Check if any Rx parameters have been passed */
3774 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3775 port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
3777 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3778 port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
3780 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3781 port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
3783 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3784 port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
3786 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3787 port->rxq[qid].conf.rx_drop_en = rx_drop_en;
3789 port->nb_rx_desc[qid] = nb_rxd;
3792 for (qid = 0; qid < nb_txq; qid++) {
3793 offloads = port->txq[qid].conf.offloads;
3794 port->txq[qid].conf = port->dev_info.default_txconf;
3796 port->txq[qid].conf.offloads = offloads;
3798 /* Check if any Tx parameters have been passed */
3799 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3800 port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
3802 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3803 port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
3805 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3806 port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
3808 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3809 port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
3811 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3812 port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
3814 port->nb_tx_desc[qid] = nb_txd;
3819 * Helper function to set MTU from frame size
3821 * port->dev_info should be set before calling this function.
3823 * return 0 on success, negative on error
3826 update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
3828 struct rte_port *port = &ports[portid];
3829 uint32_t eth_overhead;
3830 uint16_t mtu, new_mtu;
3832 eth_overhead = get_eth_overhead(&port->dev_info);
3834 if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
3835 printf("Failed to get MTU for port %u\n", portid);
3839 new_mtu = max_rx_pktlen - eth_overhead;
3844 if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
3846 "Failed to set MTU to %u for port %u\n",
3851 port->dev_conf.rxmode.mtu = new_mtu;
3857 init_port_config(void)
3860 struct rte_port *port;
3863 RTE_ETH_FOREACH_DEV(pid) {
3865 port->dev_conf.fdir_conf = fdir_conf;
3867 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3872 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3873 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3874 rss_hf & port->dev_info.flow_type_rss_offloads;
3876 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3877 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3880 if (port->dcb_flag == 0) {
3881 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
3882 port->dev_conf.rxmode.mq_mode =
3883 (enum rte_eth_rx_mq_mode)
3884 (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
3886 port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
3887 port->dev_conf.rxmode.offloads &=
3888 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3891 i < port->dev_info.nb_rx_queues;
3893 port->rxq[i].conf.offloads &=
3894 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
3898 rxtx_port_config(pid);
3900 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3904 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3905 rte_pmd_ixgbe_bypass_init(pid);
3908 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3909 port->dev_conf.intr_conf.lsc = 1;
3910 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3911 port->dev_conf.intr_conf.rmv = 1;
3915 void set_port_slave_flag(portid_t slave_pid)
3917 struct rte_port *port;
3919 port = &ports[slave_pid];
3920 port->slave_flag = 1;
3923 void clear_port_slave_flag(portid_t slave_pid)
3925 struct rte_port *port;
3927 port = &ports[slave_pid];
3928 port->slave_flag = 0;
3931 uint8_t port_is_bonding_slave(portid_t slave_pid)
3933 struct rte_port *port;
3934 struct rte_eth_dev_info dev_info;
3937 port = &ports[slave_pid];
3938 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3941 "Failed to get device info for port id %d,"
3942 "cannot determine if the port is a bonded slave",
3946 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3951 const uint16_t vlan_tags[] = {
3952 0, 1, 2, 3, 4, 5, 6, 7,
3953 8, 9, 10, 11, 12, 13, 14, 15,
3954 16, 17, 18, 19, 20, 21, 22, 23,
3955 24, 25, 26, 27, 28, 29, 30, 31
3959 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3960 enum dcb_mode_enable dcb_mode,
3961 enum rte_eth_nb_tcs num_tcs,
3966 struct rte_eth_rss_conf rss_conf;
3969 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3970 * given above, and the number of traffic classes available for use.
3972 if (dcb_mode == DCB_VT_ENABLED) {
3973 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3974 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3975 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3976 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3978 /* VMDQ+DCB RX and TX configurations */
3979 vmdq_rx_conf->enable_default_pool = 0;
3980 vmdq_rx_conf->default_pool = 0;
3981 vmdq_rx_conf->nb_queue_pools =
3982 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3983 vmdq_tx_conf->nb_queue_pools =
3984 (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
3986 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3987 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3988 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3989 vmdq_rx_conf->pool_map[i].pools =
3990 1 << (i % vmdq_rx_conf->nb_queue_pools);
3992 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3993 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3994 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3997 /* set DCB mode of RX and TX of multiple queues */
3998 eth_conf->rxmode.mq_mode =
3999 (enum rte_eth_rx_mq_mode)
4000 (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
4001 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
4003 struct rte_eth_dcb_rx_conf *rx_conf =
4004 ð_conf->rx_adv_conf.dcb_rx_conf;
4005 struct rte_eth_dcb_tx_conf *tx_conf =
4006 ð_conf->tx_adv_conf.dcb_tx_conf;
4008 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
4010 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
4014 rx_conf->nb_tcs = num_tcs;
4015 tx_conf->nb_tcs = num_tcs;
4017 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
4018 rx_conf->dcb_tc[i] = i % num_tcs;
4019 tx_conf->dcb_tc[i] = i % num_tcs;
4022 eth_conf->rxmode.mq_mode =
4023 (enum rte_eth_rx_mq_mode)
4024 (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
4025 eth_conf->rx_adv_conf.rss_conf = rss_conf;
4026 eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
4030 eth_conf->dcb_capability_en =
4031 RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
4033 eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
4039 init_port_dcb_config(portid_t pid,
4040 enum dcb_mode_enable dcb_mode,
4041 enum rte_eth_nb_tcs num_tcs,
4044 struct rte_eth_conf port_conf;
4045 struct rte_port *rte_port;
4049 if (num_procs > 1) {
4050 printf("The multi-process feature doesn't support dcb.\n");
4053 rte_port = &ports[pid];
4055 /* retain the original device configuration. */
4056 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
4058 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
4059 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
4062 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4063 /* remove RSS HASH offload for DCB in vt mode */
4064 if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
4065 port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4066 for (i = 0; i < nb_rxq; i++)
4067 rte_port->rxq[i].conf.offloads &=
4068 ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
4071 /* re-configure the device . */
4072 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
4076 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
4080 /* If dev_info.vmdq_pool_base is greater than 0,
4081 * the queue id of vmdq pools is started after pf queues.
4083 if (dcb_mode == DCB_VT_ENABLED &&
4084 rte_port->dev_info.vmdq_pool_base > 0) {
4086 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
4091 /* Assume the ports in testpmd have the same dcb capability
4092 * and has the same number of rxq and txq in dcb mode
4094 if (dcb_mode == DCB_VT_ENABLED) {
4095 if (rte_port->dev_info.max_vfs > 0) {
4096 nb_rxq = rte_port->dev_info.nb_rx_queues;
4097 nb_txq = rte_port->dev_info.nb_tx_queues;
4099 nb_rxq = rte_port->dev_info.max_rx_queues;
4100 nb_txq = rte_port->dev_info.max_tx_queues;
4103 /*if vt is disabled, use all pf queues */
4104 if (rte_port->dev_info.vmdq_pool_base == 0) {
4105 nb_rxq = rte_port->dev_info.max_rx_queues;
4106 nb_txq = rte_port->dev_info.max_tx_queues;
4108 nb_rxq = (queueid_t)num_tcs;
4109 nb_txq = (queueid_t)num_tcs;
4113 rx_free_thresh = 64;
4115 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
4117 rxtx_port_config(pid);
4119 rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4120 for (i = 0; i < RTE_DIM(vlan_tags); i++)
4121 rx_vft_set(pid, vlan_tags[i], 1);
4123 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
4127 rte_port->dcb_flag = 1;
4129 /* Enter DCB configuration status */
4140 /* Configuration of Ethernet ports. */
4141 ports = rte_zmalloc("testpmd: ports",
4142 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
4143 RTE_CACHE_LINE_SIZE);
4144 if (ports == NULL) {
4145 rte_exit(EXIT_FAILURE,
4146 "rte_zmalloc(%d struct rte_port) failed\n",
4149 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4150 ports[i].xstats_info.allocated = false;
4151 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4152 LIST_INIT(&ports[i].flow_tunnel_list);
4153 /* Initialize ports NUMA structures */
4154 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4155 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4156 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
4170 const char clr[] = { 27, '[', '2', 'J', '\0' };
4171 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
4173 /* Clear screen and move to top left */
4174 printf("%s%s", clr, top_left);
4176 printf("\nPort statistics ====================================");
4177 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
4178 nic_stats_display(fwd_ports_ids[i]);
4184 signal_handler(int signum)
4186 if (signum == SIGINT || signum == SIGTERM) {
4187 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
4189 #ifdef RTE_LIB_PDUMP
4190 /* uninitialize packet capture framework */
4193 #ifdef RTE_LIB_LATENCYSTATS
4194 if (latencystats_enabled != 0)
4195 rte_latencystats_uninit();
4198 /* Set flag to indicate the force termination. */
4200 /* exit with the expected status */
4201 #ifndef RTE_EXEC_ENV_WINDOWS
4202 signal(signum, SIG_DFL);
4203 kill(getpid(), signum);
4209 main(int argc, char** argv)
4216 signal(SIGINT, signal_handler);
4217 signal(SIGTERM, signal_handler);
4219 testpmd_logtype = rte_log_register("testpmd");
4220 if (testpmd_logtype < 0)
4221 rte_exit(EXIT_FAILURE, "Cannot register log type");
4222 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
4224 diag = rte_eal_init(argc, argv);
4226 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
4227 rte_strerror(rte_errno));
4229 ret = register_eth_event_callback();
4231 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
4233 #ifdef RTE_LIB_PDUMP
4234 /* initialize packet capture framework */
4239 RTE_ETH_FOREACH_DEV(port_id) {
4240 ports_ids[count] = port_id;
4243 nb_ports = (portid_t) count;
4245 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
4247 /* allocate port structures, and init them */
4250 set_def_fwd_config();
4252 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
4253 "Check the core mask argument\n");
4255 /* Bitrate/latency stats disabled by default */
4256 #ifdef RTE_LIB_BITRATESTATS
4257 bitrate_enabled = 0;
4259 #ifdef RTE_LIB_LATENCYSTATS
4260 latencystats_enabled = 0;
4263 /* on FreeBSD, mlockall() is disabled by default */
4264 #ifdef RTE_EXEC_ENV_FREEBSD
4273 launch_args_parse(argc, argv);
4275 #ifndef RTE_EXEC_ENV_WINDOWS
4276 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4277 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4282 if (tx_first && interactive)
4283 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4284 "interactive mode.\n");
4286 if (tx_first && lsc_interrupt) {
4288 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4292 if (!nb_rxq && !nb_txq)
4294 "Warning: Either rx or tx queues should be non-zero\n");
4296 if (nb_rxq > 1 && nb_rxq > nb_txq)
4298 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4304 ret = rte_dev_hotplug_handle_enable();
4307 "fail to enable hotplug handling.");
4311 ret = rte_dev_event_monitor_start();
4314 "fail to start device event monitoring.");
4318 ret = rte_dev_event_callback_register(NULL,
4319 dev_event_callback, NULL);
4322 "fail to register device event callback\n");
4327 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4328 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4330 /* set all ports to promiscuous mode by default */
4331 RTE_ETH_FOREACH_DEV(port_id) {
4332 ret = rte_eth_promiscuous_enable(port_id);
4335 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4336 port_id, rte_strerror(-ret));
4339 #ifdef RTE_LIB_METRICS
4340 /* Init metrics library */
4341 rte_metrics_init(rte_socket_id());
4344 #ifdef RTE_LIB_LATENCYSTATS
4345 if (latencystats_enabled != 0) {
4346 int ret = rte_latencystats_init(1, NULL);
4349 "Warning: latencystats init() returned error %d\n",
4351 fprintf(stderr, "Latencystats running on lcore %d\n",
4352 latencystats_lcore_id);
4356 /* Setup bitrate stats */
4357 #ifdef RTE_LIB_BITRATESTATS
4358 if (bitrate_enabled != 0) {
4359 bitrate_data = rte_stats_bitrate_create();
4360 if (bitrate_data == NULL)
4361 rte_exit(EXIT_FAILURE,
4362 "Could not allocate bitrate data.\n");
4363 rte_stats_bitrate_reg(bitrate_data);
4366 #ifdef RTE_LIB_CMDLINE
4367 if (init_cmdline() != 0)
4368 rte_exit(EXIT_FAILURE,
4369 "Could not initialise cmdline context.\n");
4371 if (strlen(cmdline_filename) != 0)
4372 cmdline_read_from_file(cmdline_filename);
4374 if (interactive == 1) {
4376 printf("Start automatic packet forwarding\n");
4377 start_packet_forwarding(0);
4389 printf("No commandline core given, start packet forwarding\n");
4390 start_packet_forwarding(tx_first);
4391 if (stats_period != 0) {
4392 uint64_t prev_time = 0, cur_time, diff_time = 0;
4393 uint64_t timer_period;
4395 /* Convert to number of cycles */
4396 timer_period = stats_period * rte_get_timer_hz();
4398 while (f_quit == 0) {
4399 cur_time = rte_get_timer_cycles();
4400 diff_time += cur_time - prev_time;
4402 if (diff_time >= timer_period) {
4404 /* Reset the timer */
4407 /* Sleep to avoid unnecessary checks */
4408 prev_time = cur_time;
4409 rte_delay_us_sleep(US_PER_S);
4413 printf("Press enter to exit\n");
4414 rc = read(0, &c, 1);
4420 ret = rte_eal_cleanup();
4422 rte_exit(EXIT_FAILURE,
4423 "EAL cleanup failed: %s\n", strerror(-ret));
4425 return EXIT_SUCCESS;