1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
12 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <sys/types.h>
19 #include <sys/queue.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
52 #include <rte_pmd_ixgbe.h>
55 #include <rte_pdump.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
65 #ifdef RTE_EXEC_ENV_WINDOWS
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
75 #define HUGE_FLAG MAP_HUGETLB
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
95 char cmdline_filename[PATH_MAX] = {0};
98 * NUMA support configuration.
99 * When set, the NUMA support attempts to dispatch the allocation of the
100 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101 * probed ports among the CPU sockets 0 and 1.
102 * Otherwise, all memory is allocated from CPU socket 0.
104 uint8_t numa_support = 1; /**< numa enabled by default */
107 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110 uint8_t socket_num = UMA_NO_CONFIG;
113 * Select mempool allocation type:
114 * - native: use regular DPDK memory
115 * - anon: use regular DPDK memory to create mempool, but populate using
116 * anonymous memory (may not be IOVA-contiguous)
117 * - xmem: use externally allocated hugepage memory
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
122 * Store specified sockets on which memory pool to be used by ports
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
128 * Store specified sockets on which RX ring to be used by ports
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
134 * Store specified sockets on which TX ring to be used by ports
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
140 * Record the Ethernet address of peer target ports to which packets are
142 * Must be instantiated with the ethernet addresses of peer traffic generator
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
149 * Probed Target Environment.
151 struct rte_port *ports; /**< For all probed ethernet ports. */
152 portid_t nb_ports; /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
159 * Test Forwarding Configuration.
160 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t nb_cfg_ports; /**< Number of configured ports. */
166 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
175 * Forwarding engines.
177 struct fwd_engine * fwd_engines[] = {
187 &five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 &ieee1588_fwd_engine,
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
208 * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
212 * In container, it cannot terminate the process which running with 'stats-period'
213 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
218 * Configuration of packet segments used to scatter received packets
219 * if some of split features is configured.
221 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
222 uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
223 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
224 uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
227 * Configuration of packet segments used by the "txonly" processing engine.
229 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
230 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
231 TXONLY_DEF_PACKET_LEN,
233 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
235 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
236 /**< Split policy for packets to TX. */
238 uint8_t txonly_multi_flow;
239 /**< Whether multiple flows are generated in TXONLY mode. */
241 uint32_t tx_pkt_times_inter;
242 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
244 uint32_t tx_pkt_times_intra;
245 /**< Timings for send scheduling in TXONLY mode, time between packets. */
247 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
248 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
249 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
250 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
252 /* current configuration is in DCB or not,0 means it is not in DCB mode */
253 uint8_t dcb_config = 0;
256 * Configurable number of RX/TX queues.
258 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
259 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
260 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
263 * Configurable number of RX/TX ring descriptors.
264 * Defaults are supplied by drivers via ethdev.
266 #define RTE_TEST_RX_DESC_DEFAULT 0
267 #define RTE_TEST_TX_DESC_DEFAULT 0
268 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
269 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
271 #define RTE_PMD_PARAM_UNSET -1
273 * Configurable values of RX and TX ring threshold registers.
276 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
277 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
278 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
280 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
281 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
282 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
285 * Configurable value of RX free threshold.
287 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
290 * Configurable value of RX drop enable.
292 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
295 * Configurable value of TX free threshold.
297 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
300 * Configurable value of TX RS bit threshold.
302 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
305 * Configurable value of buffered packets before sending.
307 uint16_t noisy_tx_sw_bufsz;
310 * Configurable value of packet buffer timeout.
312 uint16_t noisy_tx_sw_buf_flush_time;
315 * Configurable value for size of VNF internal memory area
316 * used for simulating noisy neighbour behaviour
318 uint64_t noisy_lkup_mem_sz;
321 * Configurable value of number of random writes done in
322 * VNF simulation memory area.
324 uint64_t noisy_lkup_num_writes;
327 * Configurable value of number of random reads done in
328 * VNF simulation memory area.
330 uint64_t noisy_lkup_num_reads;
333 * Configurable value of number of random reads/writes done in
334 * VNF simulation memory area.
336 uint64_t noisy_lkup_num_reads_writes;
339 * Receive Side Scaling (RSS) configuration.
341 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
344 * Port topology configuration
346 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
349 * Avoids to flush all the RX streams before starts forwarding.
351 uint8_t no_flush_rx = 0; /* flush by default */
354 * Flow API isolated mode.
356 uint8_t flow_isolate_all;
359 * Avoids to check link status when starting/stopping a port.
361 uint8_t no_link_check = 0; /* check by default */
364 * Don't automatically start all ports in interactive mode.
366 uint8_t no_device_start = 0;
369 * Enable link status change notification
371 uint8_t lsc_interrupt = 1; /* enabled by default */
374 * Enable device removal notification.
376 uint8_t rmv_interrupt = 1; /* enabled by default */
378 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
380 /* After attach, port setup is called on event or by iterator */
381 bool setup_on_probe_event = true;
383 /* Clear ptypes on port initialization. */
384 uint8_t clear_ptypes = true;
386 /* Hairpin ports configuration mode. */
387 uint16_t hairpin_mode;
389 /* Pretty printing of ethdev events */
390 static const char * const eth_event_desc[] = {
391 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
392 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
393 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
394 [RTE_ETH_EVENT_INTR_RESET] = "reset",
395 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
396 [RTE_ETH_EVENT_IPSEC] = "IPsec",
397 [RTE_ETH_EVENT_MACSEC] = "MACsec",
398 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
399 [RTE_ETH_EVENT_NEW] = "device probed",
400 [RTE_ETH_EVENT_DESTROY] = "device released",
401 [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
402 [RTE_ETH_EVENT_MAX] = NULL,
406 * Display or mask ether events
407 * Default to all events except VF_MBOX
409 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
410 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
411 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
412 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
413 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
414 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
415 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
416 (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
418 * Decide if all memory are locked for performance.
423 * NIC bypass mode configuration options.
426 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
427 /* The NIC bypass watchdog timeout. */
428 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
432 #ifdef RTE_LIB_LATENCYSTATS
435 * Set when latency stats is enabled in the commandline
437 uint8_t latencystats_enabled;
440 * Lcore ID to serive latency statistics.
442 lcoreid_t latencystats_lcore_id = -1;
447 * Ethernet device configuration.
449 struct rte_eth_rxmode rx_mode = {
450 /* Default maximum frame length.
451 * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
457 struct rte_eth_txmode tx_mode = {
458 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
461 struct rte_fdir_conf fdir_conf = {
462 .mode = RTE_FDIR_MODE_NONE,
463 .pballoc = RTE_FDIR_PBALLOC_64K,
464 .status = RTE_FDIR_REPORT_STATUS,
466 .vlan_tci_mask = 0xFFEF,
468 .src_ip = 0xFFFFFFFF,
469 .dst_ip = 0xFFFFFFFF,
472 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
473 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
475 .src_port_mask = 0xFFFF,
476 .dst_port_mask = 0xFFFF,
477 .mac_addr_byte_mask = 0xFF,
478 .tunnel_type_mask = 1,
479 .tunnel_id_mask = 0xFFFFFFFF,
484 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
487 * Display zero values by default for xstats
489 uint8_t xstats_hide_zero;
492 * Measure of CPU cycles disabled by default
494 uint8_t record_core_cycles;
497 * Display of RX and TX bursts disabled by default
499 uint8_t record_burst_stats;
501 unsigned int num_sockets = 0;
502 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
504 #ifdef RTE_LIB_BITRATESTATS
505 /* Bitrate statistics */
506 struct rte_stats_bitrates *bitrate_data;
507 lcoreid_t bitrate_lcore_id;
508 uint8_t bitrate_enabled;
511 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
512 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
515 * hexadecimal bitmask of RX mq mode can be enabled.
517 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
520 * Used to set forced link speed
522 uint32_t eth_link_speed;
525 * ID of the current process in multi-process, used to
526 * configure the queues to be polled.
531 * Number of processes in multi-process, used to
532 * configure the queues to be polled.
534 unsigned int num_procs = 1;
537 eth_rx_metadata_negotiate_mp(uint16_t port_id)
539 uint64_t rx_meta_features = 0;
542 if (!is_proc_primary())
545 rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG;
546 rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK;
547 rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID;
549 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features);
551 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) {
552 TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n",
556 if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) {
557 TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n",
561 if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) {
562 TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n",
565 } else if (ret != -ENOTSUP) {
566 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n",
567 port_id, rte_strerror(-ret));
572 flow_pick_transfer_proxy_mp(uint16_t port_id)
574 struct rte_port *port = &ports[port_id];
577 port->flow_transfer_proxy = port_id;
579 if (!is_proc_primary())
582 ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
585 fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
586 port_id, rte_strerror(-ret));
591 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
592 const struct rte_eth_conf *dev_conf)
594 if (is_proc_primary())
595 return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
601 eth_dev_start_mp(uint16_t port_id)
603 if (is_proc_primary())
604 return rte_eth_dev_start(port_id);
610 eth_dev_stop_mp(uint16_t port_id)
612 if (is_proc_primary())
613 return rte_eth_dev_stop(port_id);
619 mempool_free_mp(struct rte_mempool *mp)
621 if (is_proc_primary())
622 rte_mempool_free(mp);
626 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
628 if (is_proc_primary())
629 return rte_eth_dev_set_mtu(port_id, mtu);
634 /* Forward function declarations */
635 static void setup_attached_port(portid_t pi);
636 static void check_all_ports_link_status(uint32_t port_mask);
637 static int eth_event_callback(portid_t port_id,
638 enum rte_eth_event_type type,
639 void *param, void *ret_param);
640 static void dev_event_callback(const char *device_name,
641 enum rte_dev_event_type type,
645 * Check if all the ports are started.
646 * If yes, return positive value. If not, return zero.
648 static int all_ports_started(void);
650 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
651 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
653 /* Holds the registered mbuf dynamic flags names. */
654 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
657 * Helper function to check if socket is already discovered.
658 * If yes, return positive value. If not, return zero.
661 new_socket_id(unsigned int socket_id)
665 for (i = 0; i < num_sockets; i++) {
666 if (socket_ids[i] == socket_id)
673 * Setup default configuration.
676 set_default_fwd_lcores_config(void)
680 unsigned int sock_num;
683 for (i = 0; i < RTE_MAX_LCORE; i++) {
684 if (!rte_lcore_is_enabled(i))
686 sock_num = rte_lcore_to_socket_id(i);
687 if (new_socket_id(sock_num)) {
688 if (num_sockets >= RTE_MAX_NUMA_NODES) {
689 rte_exit(EXIT_FAILURE,
690 "Total sockets greater than %u\n",
693 socket_ids[num_sockets++] = sock_num;
695 if (i == rte_get_main_lcore())
697 fwd_lcores_cpuids[nb_lc++] = i;
699 nb_lcores = (lcoreid_t) nb_lc;
700 nb_cfg_lcores = nb_lcores;
705 set_def_peer_eth_addrs(void)
709 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
710 peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
711 peer_eth_addrs[i].addr_bytes[5] = i;
716 set_default_fwd_ports_config(void)
721 RTE_ETH_FOREACH_DEV(pt_id) {
722 fwd_ports_ids[i++] = pt_id;
724 /* Update sockets info according to the attached device */
725 int socket_id = rte_eth_dev_socket_id(pt_id);
726 if (socket_id >= 0 && new_socket_id(socket_id)) {
727 if (num_sockets >= RTE_MAX_NUMA_NODES) {
728 rte_exit(EXIT_FAILURE,
729 "Total sockets greater than %u\n",
732 socket_ids[num_sockets++] = socket_id;
736 nb_cfg_ports = nb_ports;
737 nb_fwd_ports = nb_ports;
741 set_def_fwd_config(void)
743 set_default_fwd_lcores_config();
744 set_def_peer_eth_addrs();
745 set_default_fwd_ports_config();
748 #ifndef RTE_EXEC_ENV_WINDOWS
749 /* extremely pessimistic estimation of memory required to create a mempool */
751 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
753 unsigned int n_pages, mbuf_per_pg, leftover;
754 uint64_t total_mem, mbuf_mem, obj_sz;
756 /* there is no good way to predict how much space the mempool will
757 * occupy because it will allocate chunks on the fly, and some of those
758 * will come from default DPDK memory while some will come from our
759 * external memory, so just assume 128MB will be enough for everyone.
761 uint64_t hdr_mem = 128 << 20;
763 /* account for possible non-contiguousness */
764 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
766 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
770 mbuf_per_pg = pgsz / obj_sz;
771 leftover = (nb_mbufs % mbuf_per_pg) > 0;
772 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
774 mbuf_mem = n_pages * pgsz;
776 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
778 if (total_mem > SIZE_MAX) {
779 TESTPMD_LOG(ERR, "Memory size too big\n");
782 *out = (size_t)total_mem;
788 pagesz_flags(uint64_t page_sz)
790 /* as per mmap() manpage, all page sizes are log2 of page size
791 * shifted by MAP_HUGE_SHIFT
793 int log2 = rte_log2_u64(page_sz);
795 return (log2 << HUGE_SHIFT);
799 alloc_mem(size_t memsz, size_t pgsz, bool huge)
804 /* allocate anonymous hugepages */
805 flags = MAP_ANONYMOUS | MAP_PRIVATE;
807 flags |= HUGE_FLAG | pagesz_flags(pgsz);
809 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
810 if (addr == MAP_FAILED)
816 struct extmem_param {
820 rte_iova_t *iova_table;
821 unsigned int iova_table_len;
825 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
828 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
829 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
830 unsigned int cur_page, n_pages, pgsz_idx;
831 size_t mem_sz, cur_pgsz;
832 rte_iova_t *iovas = NULL;
836 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
837 /* skip anything that is too big */
838 if (pgsizes[pgsz_idx] > SIZE_MAX)
841 cur_pgsz = pgsizes[pgsz_idx];
843 /* if we were told not to allocate hugepages, override */
845 cur_pgsz = sysconf(_SC_PAGESIZE);
847 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
849 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
853 /* allocate our memory */
854 addr = alloc_mem(mem_sz, cur_pgsz, huge);
856 /* if we couldn't allocate memory with a specified page size,
857 * that doesn't mean we can't do it with other page sizes, so
863 /* store IOVA addresses for every page in this memory area */
864 n_pages = mem_sz / cur_pgsz;
866 iovas = malloc(sizeof(*iovas) * n_pages);
869 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
872 /* lock memory if it's not huge pages */
876 /* populate IOVA addresses */
877 for (cur_page = 0; cur_page < n_pages; cur_page++) {
882 offset = cur_pgsz * cur_page;
883 cur = RTE_PTR_ADD(addr, offset);
885 /* touch the page before getting its IOVA */
886 *(volatile char *)cur = 0;
888 iova = rte_mem_virt2iova(cur);
890 iovas[cur_page] = iova;
895 /* if we couldn't allocate anything */
901 param->pgsz = cur_pgsz;
902 param->iova_table = iovas;
903 param->iova_table_len = n_pages;
910 munmap(addr, mem_sz);
916 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
918 struct extmem_param param;
921 memset(¶m, 0, sizeof(param));
923 /* check if our heap exists */
924 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
926 /* create our heap */
927 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
929 TESTPMD_LOG(ERR, "Cannot create heap\n");
934 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
936 TESTPMD_LOG(ERR, "Cannot create memory area\n");
940 /* we now have a valid memory area, so add it to heap */
941 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
942 param.addr, param.len, param.iova_table,
943 param.iova_table_len, param.pgsz);
945 /* when using VFIO, memory is automatically mapped for DMA by EAL */
947 /* not needed any more */
948 free(param.iova_table);
951 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
952 munmap(param.addr, param.len);
958 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
964 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
965 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
970 RTE_ETH_FOREACH_DEV(pid) {
971 struct rte_eth_dev_info dev_info;
973 ret = eth_dev_info_get_print_err(pid, &dev_info);
976 "unable to get device info for port %d on addr 0x%p,"
977 "mempool unmapping will not be performed\n",
982 ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
985 "unable to DMA unmap addr 0x%p "
987 memhdr->addr, dev_info.device->name);
990 ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
993 "unable to un-register addr 0x%p\n", memhdr->addr);
998 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
999 struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
1002 size_t page_size = sysconf(_SC_PAGESIZE);
1005 ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
1009 "unable to register addr 0x%p\n", memhdr->addr);
1012 RTE_ETH_FOREACH_DEV(pid) {
1013 struct rte_eth_dev_info dev_info;
1015 ret = eth_dev_info_get_print_err(pid, &dev_info);
1018 "unable to get device info for port %d on addr 0x%p,"
1019 "mempool mapping will not be performed\n",
1023 ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
1026 "unable to DMA map addr 0x%p "
1028 memhdr->addr, dev_info.device->name);
1035 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
1036 char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
1038 struct rte_pktmbuf_extmem *xmem;
1039 unsigned int ext_num, zone_num, elt_num;
1042 elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
1043 elt_num = EXTBUF_ZONE_SIZE / elt_size;
1044 zone_num = (nb_mbufs + elt_num - 1) / elt_num;
1046 xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
1048 TESTPMD_LOG(ERR, "Cannot allocate memory for "
1049 "external buffer descriptors\n");
1053 for (ext_num = 0; ext_num < zone_num; ext_num++) {
1054 struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1055 const struct rte_memzone *mz;
1056 char mz_name[RTE_MEMZONE_NAMESIZE];
1059 ret = snprintf(mz_name, sizeof(mz_name),
1060 RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1061 if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1062 errno = ENAMETOOLONG;
1066 mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1068 RTE_MEMZONE_IOVA_CONTIG |
1070 RTE_MEMZONE_SIZE_HINT_ONLY,
1074 * The caller exits on external buffer creation
1075 * error, so there is no need to free memzones.
1081 xseg->buf_ptr = mz->addr;
1082 xseg->buf_iova = mz->iova;
1083 xseg->buf_len = EXTBUF_ZONE_SIZE;
1084 xseg->elt_size = elt_size;
1086 if (ext_num == 0 && xmem != NULL) {
1095 * Configuration initialisation done once at init time.
1097 static struct rte_mempool *
1098 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1099 unsigned int socket_id, uint16_t size_idx)
1101 char pool_name[RTE_MEMPOOL_NAMESIZE];
1102 struct rte_mempool *rte_mp = NULL;
1103 #ifndef RTE_EXEC_ENV_WINDOWS
1106 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1108 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1109 if (!is_proc_primary()) {
1110 rte_mp = rte_mempool_lookup(pool_name);
1112 rte_exit(EXIT_FAILURE,
1113 "Get mbuf pool for socket %u failed: %s\n",
1114 socket_id, rte_strerror(rte_errno));
1119 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1120 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1122 switch (mp_alloc_type) {
1123 case MP_ALLOC_NATIVE:
1125 /* wrapper to rte_mempool_create() */
1126 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1127 rte_mbuf_best_mempool_ops());
1128 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1129 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1132 #ifndef RTE_EXEC_ENV_WINDOWS
1135 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1136 mb_size, (unsigned int) mb_mempool_cache,
1137 sizeof(struct rte_pktmbuf_pool_private),
1138 socket_id, mempool_flags);
1142 if (rte_mempool_populate_anon(rte_mp) == 0) {
1143 rte_mempool_free(rte_mp);
1147 rte_pktmbuf_pool_init(rte_mp, NULL);
1148 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1149 rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1153 case MP_ALLOC_XMEM_HUGE:
1156 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1158 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1159 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1162 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1163 if (heap_socket < 0)
1164 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1166 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1167 rte_mbuf_best_mempool_ops());
1168 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1169 mb_mempool_cache, 0, mbuf_seg_size,
1176 struct rte_pktmbuf_extmem *ext_mem;
1177 unsigned int ext_num;
1179 ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
1180 socket_id, pool_name, &ext_mem);
1182 rte_exit(EXIT_FAILURE,
1183 "Can't create pinned data buffers\n");
1185 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1186 rte_mbuf_best_mempool_ops());
1187 rte_mp = rte_pktmbuf_pool_create_extbuf
1188 (pool_name, nb_mbuf, mb_mempool_cache,
1189 0, mbuf_seg_size, socket_id,
1196 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1200 #ifndef RTE_EXEC_ENV_WINDOWS
1203 if (rte_mp == NULL) {
1204 rte_exit(EXIT_FAILURE,
1205 "Creation of mbuf pool for socket %u failed: %s\n",
1206 socket_id, rte_strerror(rte_errno));
1207 } else if (verbose_level > 0) {
1208 rte_mempool_dump(stdout, rte_mp);
1214 * Check given socket id is valid or not with NUMA mode,
1215 * if valid, return 0, else return -1
1218 check_socket_id(const unsigned int socket_id)
1220 static int warning_once = 0;
1222 if (new_socket_id(socket_id)) {
1223 if (!warning_once && numa_support)
1225 "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1233 * Get the allowed maximum number of RX queues.
1234 * *pid return the port id which has minimal value of
1235 * max_rx_queues in all ports.
1238 get_allowed_max_nb_rxq(portid_t *pid)
1240 queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1241 bool max_rxq_valid = false;
1243 struct rte_eth_dev_info dev_info;
1245 RTE_ETH_FOREACH_DEV(pi) {
1246 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1249 max_rxq_valid = true;
1250 if (dev_info.max_rx_queues < allowed_max_rxq) {
1251 allowed_max_rxq = dev_info.max_rx_queues;
1255 return max_rxq_valid ? allowed_max_rxq : 0;
1259 * Check input rxq is valid or not.
1260 * If input rxq is not greater than any of maximum number
1261 * of RX queues of all ports, it is valid.
1262 * if valid, return 0, else return -1
1265 check_nb_rxq(queueid_t rxq)
1267 queueid_t allowed_max_rxq;
1270 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1271 if (rxq > allowed_max_rxq) {
1273 "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1274 rxq, allowed_max_rxq, pid);
1281 * Get the allowed maximum number of TX queues.
1282 * *pid return the port id which has minimal value of
1283 * max_tx_queues in all ports.
1286 get_allowed_max_nb_txq(portid_t *pid)
1288 queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1289 bool max_txq_valid = false;
1291 struct rte_eth_dev_info dev_info;
1293 RTE_ETH_FOREACH_DEV(pi) {
1294 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1297 max_txq_valid = true;
1298 if (dev_info.max_tx_queues < allowed_max_txq) {
1299 allowed_max_txq = dev_info.max_tx_queues;
1303 return max_txq_valid ? allowed_max_txq : 0;
1307 * Check input txq is valid or not.
1308 * If input txq is not greater than any of maximum number
1309 * of TX queues of all ports, it is valid.
1310 * if valid, return 0, else return -1
1313 check_nb_txq(queueid_t txq)
1315 queueid_t allowed_max_txq;
1318 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1319 if (txq > allowed_max_txq) {
1321 "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1322 txq, allowed_max_txq, pid);
1329 * Get the allowed maximum number of RXDs of every rx queue.
1330 * *pid return the port id which has minimal value of
1331 * max_rxd in all queues of all ports.
1334 get_allowed_max_nb_rxd(portid_t *pid)
1336 uint16_t allowed_max_rxd = UINT16_MAX;
1338 struct rte_eth_dev_info dev_info;
1340 RTE_ETH_FOREACH_DEV(pi) {
1341 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1344 if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1345 allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1349 return allowed_max_rxd;
1353 * Get the allowed minimal number of RXDs of every rx queue.
1354 * *pid return the port id which has minimal value of
1355 * min_rxd in all queues of all ports.
1358 get_allowed_min_nb_rxd(portid_t *pid)
1360 uint16_t allowed_min_rxd = 0;
1362 struct rte_eth_dev_info dev_info;
1364 RTE_ETH_FOREACH_DEV(pi) {
1365 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1368 if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1369 allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1374 return allowed_min_rxd;
1378 * Check input rxd is valid or not.
1379 * If input rxd is not greater than any of maximum number
1380 * of RXDs of every Rx queues and is not less than any of
1381 * minimal number of RXDs of every Rx queues, it is valid.
1382 * if valid, return 0, else return -1
1385 check_nb_rxd(queueid_t rxd)
1387 uint16_t allowed_max_rxd;
1388 uint16_t allowed_min_rxd;
1391 allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1392 if (rxd > allowed_max_rxd) {
1394 "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1395 rxd, allowed_max_rxd, pid);
1399 allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1400 if (rxd < allowed_min_rxd) {
1402 "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1403 rxd, allowed_min_rxd, pid);
1411 * Get the allowed maximum number of TXDs of every rx queues.
1412 * *pid return the port id which has minimal value of
1413 * max_txd in every tx queue.
1416 get_allowed_max_nb_txd(portid_t *pid)
1418 uint16_t allowed_max_txd = UINT16_MAX;
1420 struct rte_eth_dev_info dev_info;
1422 RTE_ETH_FOREACH_DEV(pi) {
1423 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1426 if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1427 allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1431 return allowed_max_txd;
1435 * Get the allowed maximum number of TXDs of every tx queues.
1436 * *pid return the port id which has minimal value of
1437 * min_txd in every tx queue.
1440 get_allowed_min_nb_txd(portid_t *pid)
1442 uint16_t allowed_min_txd = 0;
1444 struct rte_eth_dev_info dev_info;
1446 RTE_ETH_FOREACH_DEV(pi) {
1447 if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1450 if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1451 allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1456 return allowed_min_txd;
1460 * Check input txd is valid or not.
1461 * If input txd is not greater than any of maximum number
1462 * of TXDs of every Rx queues, it is valid.
1463 * if valid, return 0, else return -1
1466 check_nb_txd(queueid_t txd)
1468 uint16_t allowed_max_txd;
1469 uint16_t allowed_min_txd;
1472 allowed_max_txd = get_allowed_max_nb_txd(&pid);
1473 if (txd > allowed_max_txd) {
1475 "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1476 txd, allowed_max_txd, pid);
1480 allowed_min_txd = get_allowed_min_nb_txd(&pid);
1481 if (txd < allowed_min_txd) {
1483 "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1484 txd, allowed_min_txd, pid);
1492 * Get the allowed maximum number of hairpin queues.
1493 * *pid return the port id which has minimal value of
1494 * max_hairpin_queues in all ports.
1497 get_allowed_max_nb_hairpinq(portid_t *pid)
1499 queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1501 struct rte_eth_hairpin_cap cap;
1503 RTE_ETH_FOREACH_DEV(pi) {
1504 if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1508 if (cap.max_nb_queues < allowed_max_hairpinq) {
1509 allowed_max_hairpinq = cap.max_nb_queues;
1513 return allowed_max_hairpinq;
1517 * Check input hairpin is valid or not.
1518 * If input hairpin is not greater than any of maximum number
1519 * of hairpin queues of all ports, it is valid.
1520 * if valid, return 0, else return -1
1523 check_nb_hairpinq(queueid_t hairpinq)
1525 queueid_t allowed_max_hairpinq;
1528 allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1529 if (hairpinq > allowed_max_hairpinq) {
1531 "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1532 hairpinq, allowed_max_hairpinq, pid);
1539 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1541 struct rte_port *port = &ports[pid];
1546 eth_rx_metadata_negotiate_mp(pid);
1547 flow_pick_transfer_proxy_mp(pid);
1549 port->dev_conf.txmode = tx_mode;
1550 port->dev_conf.rxmode = rx_mode;
1552 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1554 rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1556 ret = update_jumbo_frame_offload(pid);
1559 "Updating jumbo frame offload failed for port %u\n",
1562 if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1563 port->dev_conf.txmode.offloads &=
1564 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1566 /* Apply Rx offloads configuration */
1567 for (i = 0; i < port->dev_info.max_rx_queues; i++)
1568 port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1569 /* Apply Tx offloads configuration */
1570 for (i = 0; i < port->dev_info.max_tx_queues; i++)
1571 port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1574 port->dev_conf.link_speeds = eth_link_speed;
1576 /* set flag to initialize port/queue */
1577 port->need_reconfig = 1;
1578 port->need_reconfig_queues = 1;
1579 port->socket_id = socket_id;
1580 port->tx_metadata = 0;
1583 * Check for maximum number of segments per MTU.
1584 * Accordingly update the mbuf data size.
1586 if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1587 port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1588 data_size = rx_mode.max_rx_pkt_len /
1589 port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1591 if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
1592 mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
1593 TESTPMD_LOG(WARNING,
1594 "Configured mbuf size of the first segment %hu\n",
1604 struct rte_mempool *mbp;
1605 unsigned int nb_mbuf_per_pool;
1607 struct rte_gro_param gro_param;
1610 /* Configuration of logical cores. */
1611 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1612 sizeof(struct fwd_lcore *) * nb_lcores,
1613 RTE_CACHE_LINE_SIZE);
1614 if (fwd_lcores == NULL) {
1615 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1616 "failed\n", nb_lcores);
1618 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1619 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1620 sizeof(struct fwd_lcore),
1621 RTE_CACHE_LINE_SIZE);
1622 if (fwd_lcores[lc_id] == NULL) {
1623 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1626 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1629 RTE_ETH_FOREACH_DEV(pid) {
1633 socket_id = port_numa[pid];
1634 if (port_numa[pid] == NUMA_NO_CONFIG) {
1635 socket_id = rte_eth_dev_socket_id(pid);
1638 * if socket_id is invalid,
1639 * set to the first available socket.
1641 if (check_socket_id(socket_id) < 0)
1642 socket_id = socket_ids[0];
1645 socket_id = (socket_num == UMA_NO_CONFIG) ?
1648 /* Apply default TxRx configuration for all ports */
1649 init_config_port_offloads(pid, socket_id);
1652 * Create pools of mbuf.
1653 * If NUMA support is disabled, create a single pool of mbuf in
1654 * socket 0 memory by default.
1655 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1657 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1658 * nb_txd can be configured at run time.
1660 if (param_total_num_mbufs)
1661 nb_mbuf_per_pool = param_total_num_mbufs;
1663 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1664 (nb_lcores * mb_mempool_cache) +
1665 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1666 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1672 for (i = 0; i < num_sockets; i++)
1673 for (j = 0; j < mbuf_data_size_n; j++)
1674 mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1675 mbuf_pool_create(mbuf_data_size[j],
1681 for (i = 0; i < mbuf_data_size_n; i++)
1682 mempools[i] = mbuf_pool_create
1685 socket_num == UMA_NO_CONFIG ?
1691 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1692 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1694 * Records which Mbuf pool to use by each logical core, if needed.
1696 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1697 mbp = mbuf_pool_find(
1698 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1701 mbp = mbuf_pool_find(0, 0);
1702 fwd_lcores[lc_id]->mbp = mbp;
1703 /* initialize GSO context */
1704 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1705 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1706 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1707 fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1709 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1714 /* create a gro context for each lcore */
1715 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1716 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1717 gro_param.max_item_per_flow = MAX_PKT_BURST;
1718 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1719 gro_param.socket_id = rte_lcore_to_socket_id(
1720 fwd_lcores_cpuids[lc_id]);
1721 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1722 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1723 rte_exit(EXIT_FAILURE,
1724 "rte_gro_ctx_create() failed\n");
1731 reconfig(portid_t new_port_id, unsigned socket_id)
1733 /* Reconfiguration of Ethernet ports. */
1734 init_config_port_offloads(new_port_id, socket_id);
1740 init_fwd_streams(void)
1743 struct rte_port *port;
1744 streamid_t sm_id, nb_fwd_streams_new;
1747 /* set socket id according to numa or not */
1748 RTE_ETH_FOREACH_DEV(pid) {
1750 if (nb_rxq > port->dev_info.max_rx_queues) {
1752 "Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1753 nb_rxq, port->dev_info.max_rx_queues);
1756 if (nb_txq > port->dev_info.max_tx_queues) {
1758 "Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1759 nb_txq, port->dev_info.max_tx_queues);
1763 if (port_numa[pid] != NUMA_NO_CONFIG)
1764 port->socket_id = port_numa[pid];
1766 port->socket_id = rte_eth_dev_socket_id(pid);
1769 * if socket_id is invalid,
1770 * set to the first available socket.
1772 if (check_socket_id(port->socket_id) < 0)
1773 port->socket_id = socket_ids[0];
1777 if (socket_num == UMA_NO_CONFIG)
1778 port->socket_id = 0;
1780 port->socket_id = socket_num;
1784 q = RTE_MAX(nb_rxq, nb_txq);
1787 "Fail: Cannot allocate fwd streams as number of queues is 0\n");
1790 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1791 if (nb_fwd_streams_new == nb_fwd_streams)
1794 if (fwd_streams != NULL) {
1795 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1796 if (fwd_streams[sm_id] == NULL)
1798 rte_free(fwd_streams[sm_id]);
1799 fwd_streams[sm_id] = NULL;
1801 rte_free(fwd_streams);
1806 nb_fwd_streams = nb_fwd_streams_new;
1807 if (nb_fwd_streams) {
1808 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1809 sizeof(struct fwd_stream *) * nb_fwd_streams,
1810 RTE_CACHE_LINE_SIZE);
1811 if (fwd_streams == NULL)
1812 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1813 " (struct fwd_stream *)) failed\n",
1816 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1817 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1818 " struct fwd_stream", sizeof(struct fwd_stream),
1819 RTE_CACHE_LINE_SIZE);
1820 if (fwd_streams[sm_id] == NULL)
1821 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1822 "(struct fwd_stream) failed\n");
1830 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1832 uint64_t total_burst, sburst;
1834 uint64_t burst_stats[4];
1835 uint16_t pktnb_stats[4];
1837 int burst_percent[4], sburstp;
1841 * First compute the total number of packet bursts and the
1842 * two highest numbers of bursts of the same number of packets.
1844 memset(&burst_stats, 0x0, sizeof(burst_stats));
1845 memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1847 /* Show stats for 0 burst size always */
1848 total_burst = pbs->pkt_burst_spread[0];
1849 burst_stats[0] = pbs->pkt_burst_spread[0];
1852 /* Find the next 2 burst sizes with highest occurrences. */
1853 for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1854 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1859 total_burst += nb_burst;
1861 if (nb_burst > burst_stats[1]) {
1862 burst_stats[2] = burst_stats[1];
1863 pktnb_stats[2] = pktnb_stats[1];
1864 burst_stats[1] = nb_burst;
1865 pktnb_stats[1] = nb_pkt;
1866 } else if (nb_burst > burst_stats[2]) {
1867 burst_stats[2] = nb_burst;
1868 pktnb_stats[2] = nb_pkt;
1871 if (total_burst == 0)
1874 printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1875 for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1877 printf("%d%% of other]\n", 100 - sburstp);
1881 sburst += burst_stats[i];
1882 if (sburst == total_burst) {
1883 printf("%d%% of %d pkts]\n",
1884 100 - sburstp, (int) pktnb_stats[i]);
1889 (double)burst_stats[i] / total_burst * 100;
1890 printf("%d%% of %d pkts + ",
1891 burst_percent[i], (int) pktnb_stats[i]);
1892 sburstp += burst_percent[i];
1897 fwd_stream_stats_display(streamid_t stream_id)
1899 struct fwd_stream *fs;
1900 static const char *fwd_top_stats_border = "-------";
1902 fs = fwd_streams[stream_id];
1903 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1904 (fs->fwd_dropped == 0))
1906 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1907 "TX Port=%2d/Queue=%2d %s\n",
1908 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1909 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1910 printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1911 " TX-dropped: %-14"PRIu64,
1912 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1914 /* if checksum mode */
1915 if (cur_fwd_eng == &csum_fwd_engine) {
1916 printf(" RX- bad IP checksum: %-14"PRIu64
1917 " Rx- bad L4 checksum: %-14"PRIu64
1918 " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1919 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1920 fs->rx_bad_outer_l4_csum);
1921 printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1922 fs->rx_bad_outer_ip_csum);
1927 if (record_burst_stats) {
1928 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1929 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1934 fwd_stats_display(void)
1936 static const char *fwd_stats_border = "----------------------";
1937 static const char *acc_stats_border = "+++++++++++++++";
1939 struct fwd_stream *rx_stream;
1940 struct fwd_stream *tx_stream;
1941 uint64_t tx_dropped;
1942 uint64_t rx_bad_ip_csum;
1943 uint64_t rx_bad_l4_csum;
1944 uint64_t rx_bad_outer_l4_csum;
1945 uint64_t rx_bad_outer_ip_csum;
1946 } ports_stats[RTE_MAX_ETHPORTS];
1947 uint64_t total_rx_dropped = 0;
1948 uint64_t total_tx_dropped = 0;
1949 uint64_t total_rx_nombuf = 0;
1950 struct rte_eth_stats stats;
1951 uint64_t fwd_cycles = 0;
1952 uint64_t total_recv = 0;
1953 uint64_t total_xmit = 0;
1954 struct rte_port *port;
1959 memset(ports_stats, 0, sizeof(ports_stats));
1961 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1962 struct fwd_stream *fs = fwd_streams[sm_id];
1964 if (cur_fwd_config.nb_fwd_streams >
1965 cur_fwd_config.nb_fwd_ports) {
1966 fwd_stream_stats_display(sm_id);
1968 ports_stats[fs->tx_port].tx_stream = fs;
1969 ports_stats[fs->rx_port].rx_stream = fs;
1972 ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1974 ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1975 ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1976 ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1977 fs->rx_bad_outer_l4_csum;
1978 ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
1979 fs->rx_bad_outer_ip_csum;
1981 if (record_core_cycles)
1982 fwd_cycles += fs->core_cycles;
1984 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1985 pt_id = fwd_ports_ids[i];
1986 port = &ports[pt_id];
1988 rte_eth_stats_get(pt_id, &stats);
1989 stats.ipackets -= port->stats.ipackets;
1990 stats.opackets -= port->stats.opackets;
1991 stats.ibytes -= port->stats.ibytes;
1992 stats.obytes -= port->stats.obytes;
1993 stats.imissed -= port->stats.imissed;
1994 stats.oerrors -= port->stats.oerrors;
1995 stats.rx_nombuf -= port->stats.rx_nombuf;
1997 total_recv += stats.ipackets;
1998 total_xmit += stats.opackets;
1999 total_rx_dropped += stats.imissed;
2000 total_tx_dropped += ports_stats[pt_id].tx_dropped;
2001 total_tx_dropped += stats.oerrors;
2002 total_rx_nombuf += stats.rx_nombuf;
2004 printf("\n %s Forward statistics for port %-2d %s\n",
2005 fwd_stats_border, pt_id, fwd_stats_border);
2007 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
2008 "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
2009 stats.ipackets + stats.imissed);
2011 if (cur_fwd_eng == &csum_fwd_engine) {
2012 printf(" Bad-ipcsum: %-14"PRIu64
2013 " Bad-l4csum: %-14"PRIu64
2014 "Bad-outer-l4csum: %-14"PRIu64"\n",
2015 ports_stats[pt_id].rx_bad_ip_csum,
2016 ports_stats[pt_id].rx_bad_l4_csum,
2017 ports_stats[pt_id].rx_bad_outer_l4_csum);
2018 printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
2019 ports_stats[pt_id].rx_bad_outer_ip_csum);
2021 if (stats.ierrors + stats.rx_nombuf > 0) {
2022 printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
2023 printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
2026 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
2027 "TX-total: %-"PRIu64"\n",
2028 stats.opackets, ports_stats[pt_id].tx_dropped,
2029 stats.opackets + ports_stats[pt_id].tx_dropped);
2031 if (record_burst_stats) {
2032 if (ports_stats[pt_id].rx_stream)
2033 pkt_burst_stats_display("RX",
2034 &ports_stats[pt_id].rx_stream->rx_burst_stats);
2035 if (ports_stats[pt_id].tx_stream)
2036 pkt_burst_stats_display("TX",
2037 &ports_stats[pt_id].tx_stream->tx_burst_stats);
2040 printf(" %s--------------------------------%s\n",
2041 fwd_stats_border, fwd_stats_border);
2044 printf("\n %s Accumulated forward statistics for all ports"
2046 acc_stats_border, acc_stats_border);
2047 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
2049 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
2051 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
2052 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
2053 if (total_rx_nombuf > 0)
2054 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
2055 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
2057 acc_stats_border, acc_stats_border);
2058 if (record_core_cycles) {
2059 #define CYC_PER_MHZ 1E6
2060 if (total_recv > 0 || total_xmit > 0) {
2061 uint64_t total_pkts = 0;
2062 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2063 strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2064 total_pkts = total_xmit;
2066 total_pkts = total_recv;
2068 printf("\n CPU cycles/packet=%.2F (total cycles="
2069 "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2071 (double) fwd_cycles / total_pkts,
2072 fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2073 (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2079 fwd_stats_reset(void)
2085 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2086 pt_id = fwd_ports_ids[i];
2087 rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2089 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2090 struct fwd_stream *fs = fwd_streams[sm_id];
2094 fs->fwd_dropped = 0;
2095 fs->rx_bad_ip_csum = 0;
2096 fs->rx_bad_l4_csum = 0;
2097 fs->rx_bad_outer_l4_csum = 0;
2098 fs->rx_bad_outer_ip_csum = 0;
2100 memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2101 memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2102 fs->core_cycles = 0;
2107 flush_fwd_rx_queues(void)
2109 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2116 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2117 uint64_t timer_period;
2119 if (num_procs > 1) {
2120 printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2124 /* convert to number of cycles */
2125 timer_period = rte_get_timer_hz(); /* 1 second timeout */
2127 for (j = 0; j < 2; j++) {
2128 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2129 for (rxq = 0; rxq < nb_rxq; rxq++) {
2130 port_id = fwd_ports_ids[rxp];
2132 * testpmd can stuck in the below do while loop
2133 * if rte_eth_rx_burst() always returns nonzero
2134 * packets. So timer is added to exit this loop
2135 * after 1sec timer expiry.
2137 prev_tsc = rte_rdtsc();
2139 nb_rx = rte_eth_rx_burst(port_id, rxq,
2140 pkts_burst, MAX_PKT_BURST);
2141 for (i = 0; i < nb_rx; i++)
2142 rte_pktmbuf_free(pkts_burst[i]);
2144 cur_tsc = rte_rdtsc();
2145 diff_tsc = cur_tsc - prev_tsc;
2146 timer_tsc += diff_tsc;
2147 } while ((nb_rx > 0) &&
2148 (timer_tsc < timer_period));
2152 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2157 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2159 struct fwd_stream **fsm;
2162 #ifdef RTE_LIB_BITRATESTATS
2163 uint64_t tics_per_1sec;
2164 uint64_t tics_datum;
2165 uint64_t tics_current;
2166 uint16_t i, cnt_ports;
2168 cnt_ports = nb_ports;
2169 tics_datum = rte_rdtsc();
2170 tics_per_1sec = rte_get_timer_hz();
2172 fsm = &fwd_streams[fc->stream_idx];
2173 nb_fs = fc->stream_nb;
2175 for (sm_id = 0; sm_id < nb_fs; sm_id++)
2176 (*pkt_fwd)(fsm[sm_id]);
2177 #ifdef RTE_LIB_BITRATESTATS
2178 if (bitrate_enabled != 0 &&
2179 bitrate_lcore_id == rte_lcore_id()) {
2180 tics_current = rte_rdtsc();
2181 if (tics_current - tics_datum >= tics_per_1sec) {
2182 /* Periodic bitrate calculation */
2183 for (i = 0; i < cnt_ports; i++)
2184 rte_stats_bitrate_calc(bitrate_data,
2186 tics_datum = tics_current;
2190 #ifdef RTE_LIB_LATENCYSTATS
2191 if (latencystats_enabled != 0 &&
2192 latencystats_lcore_id == rte_lcore_id())
2193 rte_latencystats_update();
2196 } while (! fc->stopped);
2200 start_pkt_forward_on_core(void *fwd_arg)
2202 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2203 cur_fwd_config.fwd_eng->packet_fwd);
2208 * Run the TXONLY packet forwarding engine to send a single burst of packets.
2209 * Used to start communication flows in network loopback test configurations.
2212 run_one_txonly_burst_on_core(void *fwd_arg)
2214 struct fwd_lcore *fwd_lc;
2215 struct fwd_lcore tmp_lcore;
2217 fwd_lc = (struct fwd_lcore *) fwd_arg;
2218 tmp_lcore = *fwd_lc;
2219 tmp_lcore.stopped = 1;
2220 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2225 * Launch packet forwarding:
2226 * - Setup per-port forwarding context.
2227 * - launch logical cores with their forwarding configuration.
2230 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2236 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2237 lc_id = fwd_lcores_cpuids[i];
2238 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2239 fwd_lcores[i]->stopped = 0;
2240 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2241 fwd_lcores[i], lc_id);
2244 "launch lcore %u failed - diag=%d\n",
2251 * Launch packet forwarding configuration.
2254 start_packet_forwarding(int with_tx_first)
2256 port_fwd_begin_t port_fwd_begin;
2257 port_fwd_end_t port_fwd_end;
2260 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2261 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2263 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2264 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2266 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2267 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2268 (!nb_rxq || !nb_txq))
2269 rte_exit(EXIT_FAILURE,
2270 "Either rxq or txq are 0, cannot use %s fwd mode\n",
2271 cur_fwd_eng->fwd_mode_name);
2273 if (all_ports_started() == 0) {
2274 fprintf(stderr, "Not all ports were started\n");
2277 if (test_done == 0) {
2278 fprintf(stderr, "Packet forwarding already started\n");
2284 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2285 if (port_fwd_begin != NULL) {
2286 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2287 if (port_fwd_begin(fwd_ports_ids[i])) {
2289 "Packet forwarding is not ready\n");
2295 if (with_tx_first) {
2296 port_fwd_begin = tx_only_engine.port_fwd_begin;
2297 if (port_fwd_begin != NULL) {
2298 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2299 if (port_fwd_begin(fwd_ports_ids[i])) {
2301 "Packet forwarding is not ready\n");
2311 flush_fwd_rx_queues();
2313 pkt_fwd_config_display(&cur_fwd_config);
2314 rxtx_config_display();
2317 if (with_tx_first) {
2318 while (with_tx_first--) {
2319 launch_packet_forwarding(
2320 run_one_txonly_burst_on_core);
2321 rte_eal_mp_wait_lcore();
2323 port_fwd_end = tx_only_engine.port_fwd_end;
2324 if (port_fwd_end != NULL) {
2325 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2326 (*port_fwd_end)(fwd_ports_ids[i]);
2329 launch_packet_forwarding(start_pkt_forward_on_core);
2333 stop_packet_forwarding(void)
2335 port_fwd_end_t port_fwd_end;
2341 fprintf(stderr, "Packet forwarding not started\n");
2344 printf("Telling cores to stop...");
2345 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2346 fwd_lcores[lc_id]->stopped = 1;
2347 printf("\nWaiting for lcores to finish...\n");
2348 rte_eal_mp_wait_lcore();
2349 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2350 if (port_fwd_end != NULL) {
2351 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2352 pt_id = fwd_ports_ids[i];
2353 (*port_fwd_end)(pt_id);
2357 fwd_stats_display();
2359 printf("\nDone.\n");
2364 dev_set_link_up(portid_t pid)
2366 if (rte_eth_dev_set_link_up(pid) < 0)
2367 fprintf(stderr, "\nSet link up fail.\n");
2371 dev_set_link_down(portid_t pid)
2373 if (rte_eth_dev_set_link_down(pid) < 0)
2374 fprintf(stderr, "\nSet link down fail.\n");
2378 all_ports_started(void)
2381 struct rte_port *port;
2383 RTE_ETH_FOREACH_DEV(pi) {
2385 /* Check if there is a port which is not started */
2386 if ((port->port_status != RTE_PORT_STARTED) &&
2387 (port->slave_flag == 0))
2391 /* No port is not started */
2396 port_is_stopped(portid_t port_id)
2398 struct rte_port *port = &ports[port_id];
2400 if ((port->port_status != RTE_PORT_STOPPED) &&
2401 (port->slave_flag == 0))
2407 all_ports_stopped(void)
2411 RTE_ETH_FOREACH_DEV(pi) {
2412 if (!port_is_stopped(pi))
2420 port_is_started(portid_t port_id)
2422 if (port_id_is_invalid(port_id, ENABLED_WARN))
2425 if (ports[port_id].port_status != RTE_PORT_STARTED)
2431 /* Configure the Rx and Tx hairpin queues for the selected port. */
2433 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2436 struct rte_eth_hairpin_conf hairpin_conf = {
2441 struct rte_port *port = &ports[pi];
2442 uint16_t peer_rx_port = pi;
2443 uint16_t peer_tx_port = pi;
2444 uint32_t manual = 1;
2445 uint32_t tx_exp = hairpin_mode & 0x10;
2447 if (!(hairpin_mode & 0xf)) {
2451 } else if (hairpin_mode & 0x1) {
2452 peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2453 RTE_ETH_DEV_NO_OWNER);
2454 if (peer_tx_port >= RTE_MAX_ETHPORTS)
2455 peer_tx_port = rte_eth_find_next_owned_by(0,
2456 RTE_ETH_DEV_NO_OWNER);
2457 if (p_pi != RTE_MAX_ETHPORTS) {
2458 peer_rx_port = p_pi;
2462 /* Last port will be the peer RX port of the first. */
2463 RTE_ETH_FOREACH_DEV(next_pi)
2464 peer_rx_port = next_pi;
2467 } else if (hairpin_mode & 0x2) {
2469 peer_rx_port = p_pi;
2471 peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2472 RTE_ETH_DEV_NO_OWNER);
2473 if (peer_rx_port >= RTE_MAX_ETHPORTS)
2476 peer_tx_port = peer_rx_port;
2480 for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2481 hairpin_conf.peers[0].port = peer_rx_port;
2482 hairpin_conf.peers[0].queue = i + nb_rxq;
2483 hairpin_conf.manual_bind = !!manual;
2484 hairpin_conf.tx_explicit = !!tx_exp;
2485 diag = rte_eth_tx_hairpin_queue_setup
2486 (pi, qi, nb_txd, &hairpin_conf);
2491 /* Fail to setup rx queue, return */
2492 if (rte_atomic16_cmpset(&(port->port_status),
2494 RTE_PORT_STOPPED) == 0)
2496 "Port %d can not be set back to stopped\n", pi);
2497 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2499 /* try to reconfigure queues next time */
2500 port->need_reconfig_queues = 1;
2503 for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2504 hairpin_conf.peers[0].port = peer_tx_port;
2505 hairpin_conf.peers[0].queue = i + nb_txq;
2506 hairpin_conf.manual_bind = !!manual;
2507 hairpin_conf.tx_explicit = !!tx_exp;
2508 diag = rte_eth_rx_hairpin_queue_setup
2509 (pi, qi, nb_rxd, &hairpin_conf);
2514 /* Fail to setup rx queue, return */
2515 if (rte_atomic16_cmpset(&(port->port_status),
2517 RTE_PORT_STOPPED) == 0)
2519 "Port %d can not be set back to stopped\n", pi);
2520 fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2522 /* try to reconfigure queues next time */
2523 port->need_reconfig_queues = 1;
2529 /* Configure the Rx with optional split. */
2531 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2532 uint16_t nb_rx_desc, unsigned int socket_id,
2533 struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2535 union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2536 unsigned int i, mp_n;
2539 if (rx_pkt_nb_segs <= 1 ||
2540 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2541 rx_conf->rx_seg = NULL;
2542 rx_conf->rx_nseg = 0;
2543 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2544 nb_rx_desc, socket_id,
2548 for (i = 0; i < rx_pkt_nb_segs; i++) {
2549 struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2550 struct rte_mempool *mpx;
2552 * Use last valid pool for the segments with number
2553 * exceeding the pool index.
2555 mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2556 mpx = mbuf_pool_find(socket_id, mp_n);
2557 /* Handle zero as mbuf data buffer size. */
2558 rx_seg->length = rx_pkt_seg_lengths[i] ?
2559 rx_pkt_seg_lengths[i] :
2560 mbuf_data_size[mp_n];
2561 rx_seg->offset = i < rx_pkt_nb_offs ?
2562 rx_pkt_seg_offsets[i] : 0;
2563 rx_seg->mp = mpx ? mpx : mp;
2565 rx_conf->rx_nseg = rx_pkt_nb_segs;
2566 rx_conf->rx_seg = rx_useg;
2567 ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2568 socket_id, rx_conf, NULL);
2569 rx_conf->rx_seg = NULL;
2570 rx_conf->rx_nseg = 0;
2575 start_port(portid_t pid)
2577 int diag, need_check_link_status = -1;
2579 portid_t p_pi = RTE_MAX_ETHPORTS;
2580 portid_t pl[RTE_MAX_ETHPORTS];
2581 portid_t peer_pl[RTE_MAX_ETHPORTS];
2582 uint16_t cnt_pi = 0;
2583 uint16_t cfg_pi = 0;
2586 struct rte_port *port;
2587 struct rte_eth_hairpin_cap cap;
2589 if (port_id_is_invalid(pid, ENABLED_WARN))
2592 RTE_ETH_FOREACH_DEV(pi) {
2593 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2596 need_check_link_status = 0;
2598 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2599 RTE_PORT_HANDLING) == 0) {
2600 fprintf(stderr, "Port %d is now not stopped\n", pi);
2604 if (port->need_reconfig > 0) {
2605 port->need_reconfig = 0;
2607 if (flow_isolate_all) {
2608 int ret = port_flow_isolate(pi, 1);
2611 "Failed to apply isolated mode on port %d\n",
2616 configure_rxtx_dump_callbacks(0);
2617 printf("Configuring Port %d (socket %u)\n", pi,
2619 if (nb_hairpinq > 0 &&
2620 rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2622 "Port %d doesn't support hairpin queues\n",
2626 /* configure port */
2627 diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2628 nb_txq + nb_hairpinq,
2631 if (rte_atomic16_cmpset(&(port->port_status),
2632 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2634 "Port %d can not be set back to stopped\n",
2636 fprintf(stderr, "Fail to configure port %d\n",
2638 /* try to reconfigure port next time */
2639 port->need_reconfig = 1;
2643 if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2644 port->need_reconfig_queues = 0;
2645 /* setup tx queues */
2646 for (qi = 0; qi < nb_txq; qi++) {
2647 if ((numa_support) &&
2648 (txring_numa[pi] != NUMA_NO_CONFIG))
2649 diag = rte_eth_tx_queue_setup(pi, qi,
2650 port->nb_tx_desc[qi],
2652 &(port->tx_conf[qi]));
2654 diag = rte_eth_tx_queue_setup(pi, qi,
2655 port->nb_tx_desc[qi],
2657 &(port->tx_conf[qi]));
2662 /* Fail to setup tx queue, return */
2663 if (rte_atomic16_cmpset(&(port->port_status),
2665 RTE_PORT_STOPPED) == 0)
2667 "Port %d can not be set back to stopped\n",
2670 "Fail to configure port %d tx queues\n",
2672 /* try to reconfigure queues next time */
2673 port->need_reconfig_queues = 1;
2676 for (qi = 0; qi < nb_rxq; qi++) {
2677 /* setup rx queues */
2678 if ((numa_support) &&
2679 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2680 struct rte_mempool * mp =
2682 (rxring_numa[pi], 0);
2685 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2690 diag = rx_queue_setup(pi, qi,
2691 port->nb_rx_desc[qi],
2693 &(port->rx_conf[qi]),
2696 struct rte_mempool *mp =
2698 (port->socket_id, 0);
2701 "Failed to setup RX queue: No mempool allocation on the socket %d\n",
2705 diag = rx_queue_setup(pi, qi,
2706 port->nb_rx_desc[qi],
2708 &(port->rx_conf[qi]),
2714 /* Fail to setup rx queue, return */
2715 if (rte_atomic16_cmpset(&(port->port_status),
2717 RTE_PORT_STOPPED) == 0)
2719 "Port %d can not be set back to stopped\n",
2722 "Fail to configure port %d rx queues\n",
2724 /* try to reconfigure queues next time */
2725 port->need_reconfig_queues = 1;
2728 /* setup hairpin queues */
2729 if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2732 configure_rxtx_dump_callbacks(verbose_level);
2734 diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2738 "Port %d: Failed to disable Ptype parsing\n",
2746 diag = eth_dev_start_mp(pi);
2748 fprintf(stderr, "Fail to start port %d: %s\n",
2749 pi, rte_strerror(-diag));
2751 /* Fail to setup rx queue, return */
2752 if (rte_atomic16_cmpset(&(port->port_status),
2753 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2755 "Port %d can not be set back to stopped\n",
2760 if (rte_atomic16_cmpset(&(port->port_status),
2761 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2762 fprintf(stderr, "Port %d can not be set into started\n",
2765 if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2766 printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2767 RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2769 /* at least one port started, need checking link status */
2770 need_check_link_status = 1;
2775 if (need_check_link_status == 1 && !no_link_check)
2776 check_all_ports_link_status(RTE_PORT_ALL);
2777 else if (need_check_link_status == 0)
2778 fprintf(stderr, "Please stop the ports first\n");
2780 if (hairpin_mode & 0xf) {
2784 /* bind all started hairpin ports */
2785 for (i = 0; i < cfg_pi; i++) {
2787 /* bind current Tx to all peer Rx */
2788 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2789 RTE_MAX_ETHPORTS, 1);
2792 for (j = 0; j < peer_pi; j++) {
2793 if (!port_is_started(peer_pl[j]))
2795 diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2798 "Error during binding hairpin Tx port %u to %u: %s\n",
2800 rte_strerror(-diag));
2804 /* bind all peer Tx to current Rx */
2805 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2806 RTE_MAX_ETHPORTS, 0);
2809 for (j = 0; j < peer_pi; j++) {
2810 if (!port_is_started(peer_pl[j]))
2812 diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2815 "Error during binding hairpin Tx port %u to %u: %s\n",
2817 rte_strerror(-diag));
2829 stop_port(portid_t pid)
2832 struct rte_port *port;
2833 int need_check_link_status = 0;
2834 portid_t peer_pl[RTE_MAX_ETHPORTS];
2837 if (port_id_is_invalid(pid, ENABLED_WARN))
2840 printf("Stopping ports...\n");
2842 RTE_ETH_FOREACH_DEV(pi) {
2843 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2846 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2848 "Please remove port %d from forwarding configuration.\n",
2853 if (port_is_bonding_slave(pi)) {
2855 "Please remove port %d from bonded device.\n",
2861 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2862 RTE_PORT_HANDLING) == 0)
2865 if (hairpin_mode & 0xf) {
2868 rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2869 /* unbind all peer Tx from current Rx */
2870 peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2871 RTE_MAX_ETHPORTS, 0);
2874 for (j = 0; j < peer_pi; j++) {
2875 if (!port_is_started(peer_pl[j]))
2877 rte_eth_hairpin_unbind(peer_pl[j], pi);
2881 if (port->flow_list)
2882 port_flow_flush(pi);
2884 if (eth_dev_stop_mp(pi) != 0)
2885 RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2888 if (rte_atomic16_cmpset(&(port->port_status),
2889 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2890 fprintf(stderr, "Port %d can not be set into stopped\n",
2892 need_check_link_status = 1;
2894 if (need_check_link_status && !no_link_check)
2895 check_all_ports_link_status(RTE_PORT_ALL);
2901 remove_invalid_ports_in(portid_t *array, portid_t *total)
2904 portid_t new_total = 0;
2906 for (i = 0; i < *total; i++)
2907 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2908 array[new_total] = array[i];
2915 remove_invalid_ports(void)
2917 remove_invalid_ports_in(ports_ids, &nb_ports);
2918 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2919 nb_cfg_ports = nb_fwd_ports;
2923 close_port(portid_t pid)
2926 struct rte_port *port;
2928 if (port_id_is_invalid(pid, ENABLED_WARN))
2931 printf("Closing ports...\n");
2933 RTE_ETH_FOREACH_DEV(pi) {
2934 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2937 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2939 "Please remove port %d from forwarding configuration.\n",
2944 if (port_is_bonding_slave(pi)) {
2946 "Please remove port %d from bonded device.\n",
2952 if (rte_atomic16_cmpset(&(port->port_status),
2953 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2954 fprintf(stderr, "Port %d is already closed\n", pi);
2958 if (is_proc_primary()) {
2959 port_flow_flush(pi);
2960 rte_eth_dev_close(pi);
2964 remove_invalid_ports();
2969 reset_port(portid_t pid)
2973 struct rte_port *port;
2975 if (port_id_is_invalid(pid, ENABLED_WARN))
2978 if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2979 (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2981 "Can not reset port(s), please stop port(s) first.\n");
2985 printf("Resetting ports...\n");
2987 RTE_ETH_FOREACH_DEV(pi) {
2988 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2991 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2993 "Please remove port %d from forwarding configuration.\n",
2998 if (port_is_bonding_slave(pi)) {
3000 "Please remove port %d from bonded device.\n",
3005 diag = rte_eth_dev_reset(pi);
3008 port->need_reconfig = 1;
3009 port->need_reconfig_queues = 1;
3011 fprintf(stderr, "Failed to reset port %d. diag=%d\n",
3020 attach_port(char *identifier)
3023 struct rte_dev_iterator iterator;
3025 printf("Attaching a new port...\n");
3027 if (identifier == NULL) {
3028 fprintf(stderr, "Invalid parameters are specified\n");
3032 if (rte_dev_probe(identifier) < 0) {
3033 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
3037 /* first attach mode: event */
3038 if (setup_on_probe_event) {
3039 /* new ports are detected on RTE_ETH_EVENT_NEW event */
3040 for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
3041 if (ports[pi].port_status == RTE_PORT_HANDLING &&
3042 ports[pi].need_setup != 0)
3043 setup_attached_port(pi);
3047 /* second attach mode: iterator */
3048 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
3049 /* setup ports matching the devargs used for probing */
3050 if (port_is_forwarding(pi))
3051 continue; /* port was already attached before */
3052 setup_attached_port(pi);
3057 setup_attached_port(portid_t pi)
3059 unsigned int socket_id;
3062 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
3063 /* if socket_id is invalid, set to the first available socket. */
3064 if (check_socket_id(socket_id) < 0)
3065 socket_id = socket_ids[0];
3066 reconfig(pi, socket_id);
3067 ret = rte_eth_promiscuous_enable(pi);
3070 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
3071 pi, rte_strerror(-ret));
3073 ports_ids[nb_ports++] = pi;
3074 fwd_ports_ids[nb_fwd_ports++] = pi;
3075 nb_cfg_ports = nb_fwd_ports;
3076 ports[pi].need_setup = 0;
3077 ports[pi].port_status = RTE_PORT_STOPPED;
3079 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3084 detach_device(struct rte_device *dev)
3089 fprintf(stderr, "Device already removed\n");
3093 printf("Removing a device...\n");
3095 RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3096 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3097 if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3098 fprintf(stderr, "Port %u not stopped\n",
3102 port_flow_flush(sibling);
3106 if (rte_dev_remove(dev) < 0) {
3107 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3110 remove_invalid_ports();
3112 printf("Device is detached\n");
3113 printf("Now total ports is %d\n", nb_ports);
3119 detach_port_device(portid_t port_id)
3122 struct rte_eth_dev_info dev_info;
3124 if (port_id_is_invalid(port_id, ENABLED_WARN))
3127 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3128 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3129 fprintf(stderr, "Port not stopped\n");
3132 fprintf(stderr, "Port was not closed\n");
3135 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3138 "Failed to get device info for port %d, not detaching\n",
3142 detach_device(dev_info.device);
3146 detach_devargs(char *identifier)
3148 struct rte_dev_iterator iterator;
3149 struct rte_devargs da;
3152 printf("Removing a device...\n");
3154 memset(&da, 0, sizeof(da));
3155 if (rte_devargs_parsef(&da, "%s", identifier)) {
3156 fprintf(stderr, "cannot parse identifier\n");
3160 RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3161 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3162 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3163 fprintf(stderr, "Port %u not stopped\n",
3165 rte_eth_iterator_cleanup(&iterator);
3166 rte_devargs_reset(&da);
3169 port_flow_flush(port_id);
3173 if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3174 TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3175 da.name, da.bus->name);
3176 rte_devargs_reset(&da);
3180 remove_invalid_ports();
3182 printf("Device %s is detached\n", identifier);
3183 printf("Now total ports is %d\n", nb_ports);
3185 rte_devargs_reset(&da);
3196 stop_packet_forwarding();
3198 #ifndef RTE_EXEC_ENV_WINDOWS
3199 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3201 if (mp_alloc_type == MP_ALLOC_ANON)
3202 rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3207 if (ports != NULL) {
3209 RTE_ETH_FOREACH_DEV(pt_id) {
3210 printf("\nStopping port %d...\n", pt_id);
3214 RTE_ETH_FOREACH_DEV(pt_id) {
3215 printf("\nShutting down port %d...\n", pt_id);
3222 ret = rte_dev_event_monitor_stop();
3225 "fail to stop device event monitor.");
3229 ret = rte_dev_event_callback_unregister(NULL,
3230 dev_event_callback, NULL);
3233 "fail to unregister device event callback.\n");
3237 ret = rte_dev_hotplug_handle_disable();
3240 "fail to disable hotplug handling.\n");
3244 for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3246 mempool_free_mp(mempools[i]);
3249 printf("\nBye...\n");
3252 typedef void (*cmd_func_t)(void);
3253 struct pmd_test_command {
3254 const char *cmd_name;
3255 cmd_func_t cmd_func;
3258 /* Check the link status of all ports in up to 9s, and print them finally */
3260 check_all_ports_link_status(uint32_t port_mask)
3262 #define CHECK_INTERVAL 100 /* 100ms */
3263 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3265 uint8_t count, all_ports_up, print_flag = 0;
3266 struct rte_eth_link link;
3268 char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3270 printf("Checking link statuses...\n");
3272 for (count = 0; count <= MAX_CHECK_TIME; count++) {
3274 RTE_ETH_FOREACH_DEV(portid) {
3275 if ((port_mask & (1 << portid)) == 0)
3277 memset(&link, 0, sizeof(link));
3278 ret = rte_eth_link_get_nowait(portid, &link);
3281 if (print_flag == 1)
3283 "Port %u link get failed: %s\n",
3284 portid, rte_strerror(-ret));
3287 /* print link status if flag set */
3288 if (print_flag == 1) {
3289 rte_eth_link_to_str(link_status,
3290 sizeof(link_status), &link);
3291 printf("Port %d %s\n", portid, link_status);
3294 /* clear all_ports_up flag if any link down */
3295 if (link.link_status == ETH_LINK_DOWN) {
3300 /* after finally printing all link status, get out */
3301 if (print_flag == 1)
3304 if (all_ports_up == 0) {
3306 rte_delay_ms(CHECK_INTERVAL);
3309 /* set the print_flag if all ports up or timeout */
3310 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3320 rmv_port_callback(void *arg)
3322 int need_to_start = 0;
3323 int org_no_link_check = no_link_check;
3324 portid_t port_id = (intptr_t)arg;
3325 struct rte_eth_dev_info dev_info;
3328 RTE_ETH_VALID_PORTID_OR_RET(port_id);
3330 if (!test_done && port_is_forwarding(port_id)) {
3332 stop_packet_forwarding();
3336 no_link_check = org_no_link_check;
3338 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3341 "Failed to get device info for port %d, not detaching\n",
3344 struct rte_device *device = dev_info.device;
3345 close_port(port_id);
3346 detach_device(device); /* might be already removed or have more ports */
3349 start_packet_forwarding(0);
3352 /* This function is used by the interrupt thread */
3354 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3357 RTE_SET_USED(param);
3358 RTE_SET_USED(ret_param);
3360 if (type >= RTE_ETH_EVENT_MAX) {
3362 "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3363 port_id, __func__, type);
3365 } else if (event_print_mask & (UINT32_C(1) << type)) {
3366 printf("\nPort %" PRIu16 ": %s event\n", port_id,
3367 eth_event_desc[type]);
3372 case RTE_ETH_EVENT_NEW:
3373 ports[port_id].need_setup = 1;
3374 ports[port_id].port_status = RTE_PORT_HANDLING;
3376 case RTE_ETH_EVENT_INTR_RMV:
3377 if (port_id_is_invalid(port_id, DISABLED_WARN))
3379 if (rte_eal_alarm_set(100000,
3380 rmv_port_callback, (void *)(intptr_t)port_id))
3382 "Could not set up deferred device removal\n");
3384 case RTE_ETH_EVENT_DESTROY:
3385 ports[port_id].port_status = RTE_PORT_CLOSED;
3386 printf("Port %u is closed\n", port_id);
3395 register_eth_event_callback(void)
3398 enum rte_eth_event_type event;
3400 for (event = RTE_ETH_EVENT_UNKNOWN;
3401 event < RTE_ETH_EVENT_MAX; event++) {
3402 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3407 TESTPMD_LOG(ERR, "Failed to register callback for "
3408 "%s event\n", eth_event_desc[event]);
3416 /* This function is used by the interrupt thread */
3418 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3419 __rte_unused void *arg)
3424 if (type >= RTE_DEV_EVENT_MAX) {
3425 fprintf(stderr, "%s called upon invalid event %d\n",
3431 case RTE_DEV_EVENT_REMOVE:
3432 RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3434 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3436 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3441 * Because the user's callback is invoked in eal interrupt
3442 * callback, the interrupt callback need to be finished before
3443 * it can be unregistered when detaching device. So finish
3444 * callback soon and use a deferred removal to detach device
3445 * is need. It is a workaround, once the device detaching be
3446 * moved into the eal in the future, the deferred removal could
3449 if (rte_eal_alarm_set(100000,
3450 rmv_port_callback, (void *)(intptr_t)port_id))
3452 "Could not set up deferred device removal\n");
3454 case RTE_DEV_EVENT_ADD:
3455 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3457 /* TODO: After finish kernel driver binding,
3458 * begin to attach port.
3467 rxtx_port_config(struct rte_port *port)
3472 for (qid = 0; qid < nb_rxq; qid++) {
3473 offloads = port->rx_conf[qid].offloads;
3474 port->rx_conf[qid] = port->dev_info.default_rxconf;
3476 port->rx_conf[qid].offloads = offloads;
3478 /* Check if any Rx parameters have been passed */
3479 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3480 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3482 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3483 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3485 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3486 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3488 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3489 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3491 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3492 port->rx_conf[qid].rx_drop_en = rx_drop_en;
3494 port->nb_rx_desc[qid] = nb_rxd;
3497 for (qid = 0; qid < nb_txq; qid++) {
3498 offloads = port->tx_conf[qid].offloads;
3499 port->tx_conf[qid] = port->dev_info.default_txconf;
3501 port->tx_conf[qid].offloads = offloads;
3503 /* Check if any Tx parameters have been passed */
3504 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3505 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3507 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3508 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3510 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3511 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3513 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3514 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3516 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3517 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3519 port->nb_tx_desc[qid] = nb_txd;
3524 * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
3525 * MTU is also aligned if JUMBO_FRAME offload is not set.
3527 * port->dev_info should be set before calling this function.
3529 * return 0 on success, negative on error
3532 update_jumbo_frame_offload(portid_t portid)
3534 struct rte_port *port = &ports[portid];
3535 uint32_t eth_overhead;
3536 uint64_t rx_offloads;
3540 /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
3541 if (port->dev_info.max_mtu != UINT16_MAX &&
3542 port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
3543 eth_overhead = port->dev_info.max_rx_pktlen -
3544 port->dev_info.max_mtu;
3546 eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3548 rx_offloads = port->dev_conf.rxmode.offloads;
3550 /* Default config value is 0 to use PMD specific overhead */
3551 if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
3552 port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
3554 if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
3555 rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3558 if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3560 "Frame size (%u) is not supported by port %u\n",
3561 port->dev_conf.rxmode.max_rx_pkt_len,
3565 rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3569 if (rx_offloads != port->dev_conf.rxmode.offloads) {
3572 port->dev_conf.rxmode.offloads = rx_offloads;
3574 /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
3575 for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
3577 port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3579 port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3583 /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
3584 * if unset do it here
3586 if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3587 ret = eth_dev_set_mtu_mp(portid,
3588 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
3591 "Failed to set MTU to %u for port %u\n",
3592 port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
3600 init_port_config(void)
3603 struct rte_port *port;
3606 RTE_ETH_FOREACH_DEV(pid) {
3608 port->dev_conf.fdir_conf = fdir_conf;
3610 ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3615 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3616 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3617 rss_hf & port->dev_info.flow_type_rss_offloads;
3619 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3620 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3623 if (port->dcb_flag == 0) {
3624 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3625 port->dev_conf.rxmode.mq_mode =
3626 (enum rte_eth_rx_mq_mode)
3627 (rx_mq_mode & ETH_MQ_RX_RSS);
3629 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3632 rxtx_port_config(port);
3634 ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3638 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3639 rte_pmd_ixgbe_bypass_init(pid);
3642 if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3643 port->dev_conf.intr_conf.lsc = 1;
3644 if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3645 port->dev_conf.intr_conf.rmv = 1;
3649 void set_port_slave_flag(portid_t slave_pid)
3651 struct rte_port *port;
3653 port = &ports[slave_pid];
3654 port->slave_flag = 1;
3657 void clear_port_slave_flag(portid_t slave_pid)
3659 struct rte_port *port;
3661 port = &ports[slave_pid];
3662 port->slave_flag = 0;
3665 uint8_t port_is_bonding_slave(portid_t slave_pid)
3667 struct rte_port *port;
3668 struct rte_eth_dev_info dev_info;
3671 port = &ports[slave_pid];
3672 ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3675 "Failed to get device info for port id %d,"
3676 "cannot determine if the port is a bonded slave",
3680 if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3685 const uint16_t vlan_tags[] = {
3686 0, 1, 2, 3, 4, 5, 6, 7,
3687 8, 9, 10, 11, 12, 13, 14, 15,
3688 16, 17, 18, 19, 20, 21, 22, 23,
3689 24, 25, 26, 27, 28, 29, 30, 31
3693 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3694 enum dcb_mode_enable dcb_mode,
3695 enum rte_eth_nb_tcs num_tcs,
3700 struct rte_eth_rss_conf rss_conf;
3703 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3704 * given above, and the number of traffic classes available for use.
3706 if (dcb_mode == DCB_VT_ENABLED) {
3707 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3708 ð_conf->rx_adv_conf.vmdq_dcb_conf;
3709 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3710 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3712 /* VMDQ+DCB RX and TX configurations */
3713 vmdq_rx_conf->enable_default_pool = 0;
3714 vmdq_rx_conf->default_pool = 0;
3715 vmdq_rx_conf->nb_queue_pools =
3716 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3717 vmdq_tx_conf->nb_queue_pools =
3718 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3720 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3721 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3722 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3723 vmdq_rx_conf->pool_map[i].pools =
3724 1 << (i % vmdq_rx_conf->nb_queue_pools);
3726 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3727 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3728 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3731 /* set DCB mode of RX and TX of multiple queues */
3732 eth_conf->rxmode.mq_mode =
3733 (enum rte_eth_rx_mq_mode)
3734 (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3735 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3737 struct rte_eth_dcb_rx_conf *rx_conf =
3738 ð_conf->rx_adv_conf.dcb_rx_conf;
3739 struct rte_eth_dcb_tx_conf *tx_conf =
3740 ð_conf->tx_adv_conf.dcb_tx_conf;
3742 memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3744 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3748 rx_conf->nb_tcs = num_tcs;
3749 tx_conf->nb_tcs = num_tcs;
3751 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3752 rx_conf->dcb_tc[i] = i % num_tcs;
3753 tx_conf->dcb_tc[i] = i % num_tcs;
3756 eth_conf->rxmode.mq_mode =
3757 (enum rte_eth_rx_mq_mode)
3758 (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3759 eth_conf->rx_adv_conf.rss_conf = rss_conf;
3760 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3764 eth_conf->dcb_capability_en =
3765 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3767 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3773 init_port_dcb_config(portid_t pid,
3774 enum dcb_mode_enable dcb_mode,
3775 enum rte_eth_nb_tcs num_tcs,
3778 struct rte_eth_conf port_conf;
3779 struct rte_port *rte_port;
3783 if (num_procs > 1) {
3784 printf("The multi-process feature doesn't support dcb.\n");
3787 rte_port = &ports[pid];
3789 /* retain the original device configuration. */
3790 memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf));
3792 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
3793 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3796 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3798 /* re-configure the device . */
3799 retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3803 retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3807 /* If dev_info.vmdq_pool_base is greater than 0,
3808 * the queue id of vmdq pools is started after pf queues.
3810 if (dcb_mode == DCB_VT_ENABLED &&
3811 rte_port->dev_info.vmdq_pool_base > 0) {
3813 "VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3818 /* Assume the ports in testpmd have the same dcb capability
3819 * and has the same number of rxq and txq in dcb mode
3821 if (dcb_mode == DCB_VT_ENABLED) {
3822 if (rte_port->dev_info.max_vfs > 0) {
3823 nb_rxq = rte_port->dev_info.nb_rx_queues;
3824 nb_txq = rte_port->dev_info.nb_tx_queues;
3826 nb_rxq = rte_port->dev_info.max_rx_queues;
3827 nb_txq = rte_port->dev_info.max_tx_queues;
3830 /*if vt is disabled, use all pf queues */
3831 if (rte_port->dev_info.vmdq_pool_base == 0) {
3832 nb_rxq = rte_port->dev_info.max_rx_queues;
3833 nb_txq = rte_port->dev_info.max_tx_queues;
3835 nb_rxq = (queueid_t)num_tcs;
3836 nb_txq = (queueid_t)num_tcs;
3840 rx_free_thresh = 64;
3842 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3844 rxtx_port_config(rte_port);
3846 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3847 for (i = 0; i < RTE_DIM(vlan_tags); i++)
3848 rx_vft_set(pid, vlan_tags[i], 1);
3850 retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3854 rte_port->dcb_flag = 1;
3856 /* Enter DCB configuration status */
3867 /* Configuration of Ethernet ports. */
3868 ports = rte_zmalloc("testpmd: ports",
3869 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3870 RTE_CACHE_LINE_SIZE);
3871 if (ports == NULL) {
3872 rte_exit(EXIT_FAILURE,
3873 "rte_zmalloc(%d struct rte_port) failed\n",
3876 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3877 LIST_INIT(&ports[i].flow_tunnel_list);
3878 /* Initialize ports NUMA structures */
3879 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3880 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3881 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3895 const char clr[] = { 27, '[', '2', 'J', '\0' };
3896 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3898 /* Clear screen and move to top left */
3899 printf("%s%s", clr, top_left);
3901 printf("\nPort statistics ====================================");
3902 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3903 nic_stats_display(fwd_ports_ids[i]);
3909 signal_handler(int signum)
3911 if (signum == SIGINT || signum == SIGTERM) {
3912 fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
3914 #ifdef RTE_LIB_PDUMP
3915 /* uninitialize packet capture framework */
3918 #ifdef RTE_LIB_LATENCYSTATS
3919 if (latencystats_enabled != 0)
3920 rte_latencystats_uninit();
3923 /* Set flag to indicate the force termination. */
3925 /* exit with the expected status */
3926 #ifndef RTE_EXEC_ENV_WINDOWS
3927 signal(signum, SIG_DFL);
3928 kill(getpid(), signum);
3934 main(int argc, char** argv)
3941 signal(SIGINT, signal_handler);
3942 signal(SIGTERM, signal_handler);
3944 testpmd_logtype = rte_log_register("testpmd");
3945 if (testpmd_logtype < 0)
3946 rte_exit(EXIT_FAILURE, "Cannot register log type");
3947 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3949 diag = rte_eal_init(argc, argv);
3951 rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3952 rte_strerror(rte_errno));
3954 ret = register_eth_event_callback();
3956 rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3958 #ifdef RTE_LIB_PDUMP
3959 /* initialize packet capture framework */
3964 RTE_ETH_FOREACH_DEV(port_id) {
3965 ports_ids[count] = port_id;
3968 nb_ports = (portid_t) count;
3970 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3972 /* allocate port structures, and init them */
3975 set_def_fwd_config();
3977 rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3978 "Check the core mask argument\n");
3980 /* Bitrate/latency stats disabled by default */
3981 #ifdef RTE_LIB_BITRATESTATS
3982 bitrate_enabled = 0;
3984 #ifdef RTE_LIB_LATENCYSTATS
3985 latencystats_enabled = 0;
3988 /* on FreeBSD, mlockall() is disabled by default */
3989 #ifdef RTE_EXEC_ENV_FREEBSD
3998 launch_args_parse(argc, argv);
4000 #ifndef RTE_EXEC_ENV_WINDOWS
4001 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
4002 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
4007 if (tx_first && interactive)
4008 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
4009 "interactive mode.\n");
4011 if (tx_first && lsc_interrupt) {
4013 "Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
4017 if (!nb_rxq && !nb_txq)
4019 "Warning: Either rx or tx queues should be non-zero\n");
4021 if (nb_rxq > 1 && nb_rxq > nb_txq)
4023 "Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
4029 ret = rte_dev_hotplug_handle_enable();
4032 "fail to enable hotplug handling.");
4036 ret = rte_dev_event_monitor_start();
4039 "fail to start device event monitoring.");
4043 ret = rte_dev_event_callback_register(NULL,
4044 dev_event_callback, NULL);
4047 "fail to register device event callback\n");
4052 if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
4053 rte_exit(EXIT_FAILURE, "Start ports failed\n");
4055 /* set all ports to promiscuous mode by default */
4056 RTE_ETH_FOREACH_DEV(port_id) {
4057 ret = rte_eth_promiscuous_enable(port_id);
4060 "Error during enabling promiscuous mode for port %u: %s - ignore\n",
4061 port_id, rte_strerror(-ret));
4064 /* Init metrics library */
4065 rte_metrics_init(rte_socket_id());
4067 #ifdef RTE_LIB_LATENCYSTATS
4068 if (latencystats_enabled != 0) {
4069 int ret = rte_latencystats_init(1, NULL);
4072 "Warning: latencystats init() returned error %d\n",
4074 fprintf(stderr, "Latencystats running on lcore %d\n",
4075 latencystats_lcore_id);
4079 /* Setup bitrate stats */
4080 #ifdef RTE_LIB_BITRATESTATS
4081 if (bitrate_enabled != 0) {
4082 bitrate_data = rte_stats_bitrate_create();
4083 if (bitrate_data == NULL)
4084 rte_exit(EXIT_FAILURE,
4085 "Could not allocate bitrate data.\n");
4086 rte_stats_bitrate_reg(bitrate_data);
4090 #ifdef RTE_LIB_CMDLINE
4091 if (strlen(cmdline_filename) != 0)
4092 cmdline_read_from_file(cmdline_filename);
4094 if (interactive == 1) {
4096 printf("Start automatic packet forwarding\n");
4097 start_packet_forwarding(0);
4109 printf("No commandline core given, start packet forwarding\n");
4110 start_packet_forwarding(tx_first);
4111 if (stats_period != 0) {
4112 uint64_t prev_time = 0, cur_time, diff_time = 0;
4113 uint64_t timer_period;
4115 /* Convert to number of cycles */
4116 timer_period = stats_period * rte_get_timer_hz();
4118 while (f_quit == 0) {
4119 cur_time = rte_get_timer_cycles();
4120 diff_time += cur_time - prev_time;
4122 if (diff_time >= timer_period) {
4124 /* Reset the timer */
4127 /* Sleep to avoid unnecessary checks */
4128 prev_time = cur_time;
4129 rte_delay_us_sleep(US_PER_S);
4133 printf("Press enter to exit\n");
4134 rc = read(0, &c, 1);
4140 ret = rte_eal_cleanup();
4142 rte_exit(EXIT_FAILURE,
4143 "EAL cleanup failed: %s\n", strerror(-ret));
4145 return EXIT_SUCCESS;