1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
17 #include <sys/queue.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
71 #define HUGE_FLAG MAP_HUGETLB
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
81 #define EXTMEM_HEAP_NAME "extmem"
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
90 char cmdline_filename[PATH_MAX] = {0};
93 * NUMA support configuration.
94 * When set, the NUMA support attempts to dispatch the allocation of the
95 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96 * probed ports among the CPU sockets 0 and 1.
97 * Otherwise, all memory is allocated from CPU socket 0.
99 uint8_t numa_support = 1; /**< numa enabled by default */
102 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
105 uint8_t socket_num = UMA_NO_CONFIG;
108 * Select mempool allocation type:
109 * - native: use regular DPDK memory
110 * - anon: use regular DPDK memory to create mempool, but populate using
111 * anonymous memory (may not be IOVA-contiguous)
112 * - xmem: use externally allocated hugepage memory
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
117 * Store specified sockets on which memory pool to be used by ports
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
123 * Store specified sockets on which RX ring to be used by ports
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
129 * Store specified sockets on which TX ring to be used by ports
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
135 * Record the Ethernet address of peer target ports to which packets are
137 * Must be instantiated with the ethernet addresses of peer traffic generator
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
144 * Probed Target Environment.
146 struct rte_port *ports; /**< For all probed ethernet ports. */
147 portid_t nb_ports; /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
154 * Test Forwarding Configuration.
155 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t nb_cfg_ports; /**< Number of configured ports. */
161 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
170 * Forwarding engines.
172 struct fwd_engine * fwd_engines[] = {
182 #if defined RTE_LIBRTE_PMD_SOFTNIC
185 #ifdef RTE_LIBRTE_IEEE1588
186 &ieee1588_fwd_engine,
191 struct fwd_config cur_fwd_config;
192 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
193 uint32_t retry_enabled;
194 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
195 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
197 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
198 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
199 * specified on command-line. */
200 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
203 * In container, it cannot terminate the process which running with 'stats-period'
204 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
209 * Configuration of packet segments used by the "txonly" processing engine.
211 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
212 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
213 TXONLY_DEF_PACKET_LEN,
215 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
217 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
218 /**< Split policy for packets to TX. */
220 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
221 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
223 /* current configuration is in DCB or not,0 means it is not in DCB mode */
224 uint8_t dcb_config = 0;
226 /* Whether the dcb is in testing status */
227 uint8_t dcb_test = 0;
230 * Configurable number of RX/TX queues.
232 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
233 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
236 * Configurable number of RX/TX ring descriptors.
237 * Defaults are supplied by drivers via ethdev.
239 #define RTE_TEST_RX_DESC_DEFAULT 0
240 #define RTE_TEST_TX_DESC_DEFAULT 0
241 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
242 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
244 #define RTE_PMD_PARAM_UNSET -1
246 * Configurable values of RX and TX ring threshold registers.
249 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
251 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
255 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of RX free threshold.
260 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of RX drop enable.
265 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
268 * Configurable value of TX free threshold.
270 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
273 * Configurable value of TX RS bit threshold.
275 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
278 * Configurable value of buffered packets before sending.
280 uint16_t noisy_tx_sw_bufsz;
283 * Configurable value of packet buffer timeout.
285 uint16_t noisy_tx_sw_buf_flush_time;
288 * Configurable value for size of VNF internal memory area
289 * used for simulating noisy neighbour behaviour
291 uint64_t noisy_lkup_mem_sz;
294 * Configurable value of number of random writes done in
295 * VNF simulation memory area.
297 uint64_t noisy_lkup_num_writes;
300 * Configurable value of number of random reads done in
301 * VNF simulation memory area.
303 uint64_t noisy_lkup_num_reads;
306 * Configurable value of number of random reads/writes done in
307 * VNF simulation memory area.
309 uint64_t noisy_lkup_num_reads_writes;
312 * Receive Side Scaling (RSS) configuration.
314 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
317 * Port topology configuration
319 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
322 * Avoids to flush all the RX streams before starts forwarding.
324 uint8_t no_flush_rx = 0; /* flush by default */
327 * Flow API isolated mode.
329 uint8_t flow_isolate_all;
332 * Avoids to check link status when starting/stopping a port.
334 uint8_t no_link_check = 0; /* check by default */
337 * Enable link status change notification
339 uint8_t lsc_interrupt = 1; /* enabled by default */
342 * Enable device removal notification.
344 uint8_t rmv_interrupt = 1; /* enabled by default */
346 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
348 /* Pretty printing of ethdev events */
349 static const char * const eth_event_desc[] = {
350 [RTE_ETH_EVENT_UNKNOWN] = "unknown",
351 [RTE_ETH_EVENT_INTR_LSC] = "link state change",
352 [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
353 [RTE_ETH_EVENT_INTR_RESET] = "reset",
354 [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
355 [RTE_ETH_EVENT_IPSEC] = "IPsec",
356 [RTE_ETH_EVENT_MACSEC] = "MACsec",
357 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
358 [RTE_ETH_EVENT_NEW] = "device probed",
359 [RTE_ETH_EVENT_DESTROY] = "device released",
360 [RTE_ETH_EVENT_MAX] = NULL,
364 * Display or mask ether events
365 * Default to all events except VF_MBOX
367 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
368 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
369 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
370 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
371 (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
372 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
373 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
375 * Decide if all memory are locked for performance.
380 * NIC bypass mode configuration options.
383 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
384 /* The NIC bypass watchdog timeout. */
385 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
389 #ifdef RTE_LIBRTE_LATENCY_STATS
392 * Set when latency stats is enabled in the commandline
394 uint8_t latencystats_enabled;
397 * Lcore ID to serive latency statistics.
399 lcoreid_t latencystats_lcore_id = -1;
404 * Ethernet device configuration.
406 struct rte_eth_rxmode rx_mode = {
407 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
410 struct rte_eth_txmode tx_mode = {
411 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
414 struct rte_fdir_conf fdir_conf = {
415 .mode = RTE_FDIR_MODE_NONE,
416 .pballoc = RTE_FDIR_PBALLOC_64K,
417 .status = RTE_FDIR_REPORT_STATUS,
419 .vlan_tci_mask = 0xFFEF,
421 .src_ip = 0xFFFFFFFF,
422 .dst_ip = 0xFFFFFFFF,
425 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
426 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
428 .src_port_mask = 0xFFFF,
429 .dst_port_mask = 0xFFFF,
430 .mac_addr_byte_mask = 0xFF,
431 .tunnel_type_mask = 1,
432 .tunnel_id_mask = 0xFFFFFFFF,
437 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
439 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
440 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
442 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
443 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
445 uint16_t nb_tx_queue_stats_mappings = 0;
446 uint16_t nb_rx_queue_stats_mappings = 0;
449 * Display zero values by default for xstats
451 uint8_t xstats_hide_zero;
453 unsigned int num_sockets = 0;
454 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
456 #ifdef RTE_LIBRTE_BITRATE
457 /* Bitrate statistics */
458 struct rte_stats_bitrates *bitrate_data;
459 lcoreid_t bitrate_lcore_id;
460 uint8_t bitrate_enabled;
463 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
464 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
466 struct vxlan_encap_conf vxlan_encap_conf = {
469 .vni = "\x00\x00\x00",
471 .udp_dst = RTE_BE16(4789),
472 .ipv4_src = IPv4(127, 0, 0, 1),
473 .ipv4_dst = IPv4(255, 255, 255, 255),
474 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
475 "\x00\x00\x00\x00\x00\x00\x00\x01",
476 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
477 "\x00\x00\x00\x00\x00\x00\x11\x11",
479 .eth_src = "\x00\x00\x00\x00\x00\x00",
480 .eth_dst = "\xff\xff\xff\xff\xff\xff",
483 struct nvgre_encap_conf nvgre_encap_conf = {
486 .tni = "\x00\x00\x00",
487 .ipv4_src = IPv4(127, 0, 0, 1),
488 .ipv4_dst = IPv4(255, 255, 255, 255),
489 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
490 "\x00\x00\x00\x00\x00\x00\x00\x01",
491 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
492 "\x00\x00\x00\x00\x00\x00\x11\x11",
494 .eth_src = "\x00\x00\x00\x00\x00\x00",
495 .eth_dst = "\xff\xff\xff\xff\xff\xff",
498 /* Forward function declarations */
499 static void setup_attached_port(portid_t pi);
500 static void map_port_queue_stats_mapping_registers(portid_t pi,
501 struct rte_port *port);
502 static void check_all_ports_link_status(uint32_t port_mask);
503 static int eth_event_callback(portid_t port_id,
504 enum rte_eth_event_type type,
505 void *param, void *ret_param);
506 static void eth_dev_event_callback(const char *device_name,
507 enum rte_dev_event_type type,
511 * Check if all the ports are started.
512 * If yes, return positive value. If not, return zero.
514 static int all_ports_started(void);
516 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
517 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
520 * Helper function to check if socket is already discovered.
521 * If yes, return positive value. If not, return zero.
524 new_socket_id(unsigned int socket_id)
528 for (i = 0; i < num_sockets; i++) {
529 if (socket_ids[i] == socket_id)
536 * Setup default configuration.
539 set_default_fwd_lcores_config(void)
543 unsigned int sock_num;
546 for (i = 0; i < RTE_MAX_LCORE; i++) {
547 if (!rte_lcore_is_enabled(i))
549 sock_num = rte_lcore_to_socket_id(i);
550 if (new_socket_id(sock_num)) {
551 if (num_sockets >= RTE_MAX_NUMA_NODES) {
552 rte_exit(EXIT_FAILURE,
553 "Total sockets greater than %u\n",
556 socket_ids[num_sockets++] = sock_num;
558 if (i == rte_get_master_lcore())
560 fwd_lcores_cpuids[nb_lc++] = i;
562 nb_lcores = (lcoreid_t) nb_lc;
563 nb_cfg_lcores = nb_lcores;
568 set_def_peer_eth_addrs(void)
572 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
573 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
574 peer_eth_addrs[i].addr_bytes[5] = i;
579 set_default_fwd_ports_config(void)
584 RTE_ETH_FOREACH_DEV(pt_id) {
585 fwd_ports_ids[i++] = pt_id;
587 /* Update sockets info according to the attached device */
588 int socket_id = rte_eth_dev_socket_id(pt_id);
589 if (socket_id >= 0 && new_socket_id(socket_id)) {
590 if (num_sockets >= RTE_MAX_NUMA_NODES) {
591 rte_exit(EXIT_FAILURE,
592 "Total sockets greater than %u\n",
595 socket_ids[num_sockets++] = socket_id;
599 nb_cfg_ports = nb_ports;
600 nb_fwd_ports = nb_ports;
604 set_def_fwd_config(void)
606 set_default_fwd_lcores_config();
607 set_def_peer_eth_addrs();
608 set_default_fwd_ports_config();
611 /* extremely pessimistic estimation of memory required to create a mempool */
613 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
615 unsigned int n_pages, mbuf_per_pg, leftover;
616 uint64_t total_mem, mbuf_mem, obj_sz;
618 /* there is no good way to predict how much space the mempool will
619 * occupy because it will allocate chunks on the fly, and some of those
620 * will come from default DPDK memory while some will come from our
621 * external memory, so just assume 128MB will be enough for everyone.
623 uint64_t hdr_mem = 128 << 20;
625 /* account for possible non-contiguousness */
626 obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
628 TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
632 mbuf_per_pg = pgsz / obj_sz;
633 leftover = (nb_mbufs % mbuf_per_pg) > 0;
634 n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
636 mbuf_mem = n_pages * pgsz;
638 total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
640 if (total_mem > SIZE_MAX) {
641 TESTPMD_LOG(ERR, "Memory size too big\n");
644 *out = (size_t)total_mem;
649 static inline uint32_t
652 return (uint32_t)__builtin_ctzll(v);
655 static inline uint32_t
660 v = rte_align64pow2(v);
665 pagesz_flags(uint64_t page_sz)
667 /* as per mmap() manpage, all page sizes are log2 of page size
668 * shifted by MAP_HUGE_SHIFT
670 int log2 = log2_u64(page_sz);
672 return (log2 << HUGE_SHIFT);
676 alloc_mem(size_t memsz, size_t pgsz, bool huge)
681 /* allocate anonymous hugepages */
682 flags = MAP_ANONYMOUS | MAP_PRIVATE;
684 flags |= HUGE_FLAG | pagesz_flags(pgsz);
686 addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
687 if (addr == MAP_FAILED)
693 struct extmem_param {
697 rte_iova_t *iova_table;
698 unsigned int iova_table_len;
702 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
705 uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
706 RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
707 unsigned int cur_page, n_pages, pgsz_idx;
708 size_t mem_sz, cur_pgsz;
709 rte_iova_t *iovas = NULL;
713 for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
714 /* skip anything that is too big */
715 if (pgsizes[pgsz_idx] > SIZE_MAX)
718 cur_pgsz = pgsizes[pgsz_idx];
720 /* if we were told not to allocate hugepages, override */
722 cur_pgsz = sysconf(_SC_PAGESIZE);
724 ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
726 TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
730 /* allocate our memory */
731 addr = alloc_mem(mem_sz, cur_pgsz, huge);
733 /* if we couldn't allocate memory with a specified page size,
734 * that doesn't mean we can't do it with other page sizes, so
740 /* store IOVA addresses for every page in this memory area */
741 n_pages = mem_sz / cur_pgsz;
743 iovas = malloc(sizeof(*iovas) * n_pages);
746 TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
749 /* lock memory if it's not huge pages */
753 /* populate IOVA addresses */
754 for (cur_page = 0; cur_page < n_pages; cur_page++) {
759 offset = cur_pgsz * cur_page;
760 cur = RTE_PTR_ADD(addr, offset);
762 /* touch the page before getting its IOVA */
763 *(volatile char *)cur = 0;
765 iova = rte_mem_virt2iova(cur);
767 iovas[cur_page] = iova;
772 /* if we couldn't allocate anything */
778 param->pgsz = cur_pgsz;
779 param->iova_table = iovas;
780 param->iova_table_len = n_pages;
787 munmap(addr, mem_sz);
793 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
795 struct extmem_param param;
798 memset(¶m, 0, sizeof(param));
800 /* check if our heap exists */
801 socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
803 /* create our heap */
804 ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
806 TESTPMD_LOG(ERR, "Cannot create heap\n");
811 ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
813 TESTPMD_LOG(ERR, "Cannot create memory area\n");
817 /* we now have a valid memory area, so add it to heap */
818 ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
819 param.addr, param.len, param.iova_table,
820 param.iova_table_len, param.pgsz);
822 /* when using VFIO, memory is automatically mapped for DMA by EAL */
824 /* not needed any more */
825 free(param.iova_table);
828 TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
829 munmap(param.addr, param.len);
835 TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
842 * Configuration initialisation done once at init time.
845 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
846 unsigned int socket_id)
848 char pool_name[RTE_MEMPOOL_NAMESIZE];
849 struct rte_mempool *rte_mp = NULL;
852 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
853 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
856 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
857 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
859 switch (mp_alloc_type) {
860 case MP_ALLOC_NATIVE:
862 /* wrapper to rte_mempool_create() */
863 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
864 rte_mbuf_best_mempool_ops());
865 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
866 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
871 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
872 mb_size, (unsigned int) mb_mempool_cache,
873 sizeof(struct rte_pktmbuf_pool_private),
878 if (rte_mempool_populate_anon(rte_mp) == 0) {
879 rte_mempool_free(rte_mp);
883 rte_pktmbuf_pool_init(rte_mp, NULL);
884 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
888 case MP_ALLOC_XMEM_HUGE:
891 bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
893 if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
894 rte_exit(EXIT_FAILURE, "Could not create external memory\n");
897 rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
899 rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
901 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
902 rte_mbuf_best_mempool_ops());
903 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
904 mb_mempool_cache, 0, mbuf_seg_size,
910 rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
915 if (rte_mp == NULL) {
916 rte_exit(EXIT_FAILURE,
917 "Creation of mbuf pool for socket %u failed: %s\n",
918 socket_id, rte_strerror(rte_errno));
919 } else if (verbose_level > 0) {
920 rte_mempool_dump(stdout, rte_mp);
925 * Check given socket id is valid or not with NUMA mode,
926 * if valid, return 0, else return -1
929 check_socket_id(const unsigned int socket_id)
931 static int warning_once = 0;
933 if (new_socket_id(socket_id)) {
934 if (!warning_once && numa_support)
935 printf("Warning: NUMA should be configured manually by"
936 " using --port-numa-config and"
937 " --ring-numa-config parameters along with"
946 * Get the allowed maximum number of RX queues.
947 * *pid return the port id which has minimal value of
948 * max_rx_queues in all ports.
951 get_allowed_max_nb_rxq(portid_t *pid)
953 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
955 struct rte_eth_dev_info dev_info;
957 RTE_ETH_FOREACH_DEV(pi) {
958 rte_eth_dev_info_get(pi, &dev_info);
959 if (dev_info.max_rx_queues < allowed_max_rxq) {
960 allowed_max_rxq = dev_info.max_rx_queues;
964 return allowed_max_rxq;
968 * Check input rxq is valid or not.
969 * If input rxq is not greater than any of maximum number
970 * of RX queues of all ports, it is valid.
971 * if valid, return 0, else return -1
974 check_nb_rxq(queueid_t rxq)
976 queueid_t allowed_max_rxq;
979 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
980 if (rxq > allowed_max_rxq) {
981 printf("Fail: input rxq (%u) can't be greater "
982 "than max_rx_queues (%u) of port %u\n",
992 * Get the allowed maximum number of TX queues.
993 * *pid return the port id which has minimal value of
994 * max_tx_queues in all ports.
997 get_allowed_max_nb_txq(portid_t *pid)
999 queueid_t allowed_max_txq = MAX_QUEUE_ID;
1001 struct rte_eth_dev_info dev_info;
1003 RTE_ETH_FOREACH_DEV(pi) {
1004 rte_eth_dev_info_get(pi, &dev_info);
1005 if (dev_info.max_tx_queues < allowed_max_txq) {
1006 allowed_max_txq = dev_info.max_tx_queues;
1010 return allowed_max_txq;
1014 * Check input txq is valid or not.
1015 * If input txq is not greater than any of maximum number
1016 * of TX queues of all ports, it is valid.
1017 * if valid, return 0, else return -1
1020 check_nb_txq(queueid_t txq)
1022 queueid_t allowed_max_txq;
1025 allowed_max_txq = get_allowed_max_nb_txq(&pid);
1026 if (txq > allowed_max_txq) {
1027 printf("Fail: input txq (%u) can't be greater "
1028 "than max_tx_queues (%u) of port %u\n",
1041 struct rte_port *port;
1042 struct rte_mempool *mbp;
1043 unsigned int nb_mbuf_per_pool;
1045 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
1046 struct rte_gro_param gro_param;
1050 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
1052 /* Configuration of logical cores. */
1053 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1054 sizeof(struct fwd_lcore *) * nb_lcores,
1055 RTE_CACHE_LINE_SIZE);
1056 if (fwd_lcores == NULL) {
1057 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1058 "failed\n", nb_lcores);
1060 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1061 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1062 sizeof(struct fwd_lcore),
1063 RTE_CACHE_LINE_SIZE);
1064 if (fwd_lcores[lc_id] == NULL) {
1065 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1068 fwd_lcores[lc_id]->cpuid_idx = lc_id;
1071 RTE_ETH_FOREACH_DEV(pid) {
1073 /* Apply default TxRx configuration for all ports */
1074 port->dev_conf.txmode = tx_mode;
1075 port->dev_conf.rxmode = rx_mode;
1076 rte_eth_dev_info_get(pid, &port->dev_info);
1078 if (!(port->dev_info.tx_offload_capa &
1079 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1080 port->dev_conf.txmode.offloads &=
1081 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1082 if (!(port->dev_info.tx_offload_capa &
1083 DEV_TX_OFFLOAD_MATCH_METADATA))
1084 port->dev_conf.txmode.offloads &=
1085 ~DEV_TX_OFFLOAD_MATCH_METADATA;
1087 if (port_numa[pid] != NUMA_NO_CONFIG)
1088 port_per_socket[port_numa[pid]]++;
1090 uint32_t socket_id = rte_eth_dev_socket_id(pid);
1093 * if socket_id is invalid,
1094 * set to the first available socket.
1096 if (check_socket_id(socket_id) < 0)
1097 socket_id = socket_ids[0];
1098 port_per_socket[socket_id]++;
1102 /* Apply Rx offloads configuration */
1103 for (k = 0; k < port->dev_info.max_rx_queues; k++)
1104 port->rx_conf[k].offloads =
1105 port->dev_conf.rxmode.offloads;
1106 /* Apply Tx offloads configuration */
1107 for (k = 0; k < port->dev_info.max_tx_queues; k++)
1108 port->tx_conf[k].offloads =
1109 port->dev_conf.txmode.offloads;
1111 /* set flag to initialize port/queue */
1112 port->need_reconfig = 1;
1113 port->need_reconfig_queues = 1;
1114 port->tx_metadata = 0;
1118 * Create pools of mbuf.
1119 * If NUMA support is disabled, create a single pool of mbuf in
1120 * socket 0 memory by default.
1121 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1123 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1124 * nb_txd can be configured at run time.
1126 if (param_total_num_mbufs)
1127 nb_mbuf_per_pool = param_total_num_mbufs;
1129 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1130 (nb_lcores * mb_mempool_cache) +
1131 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1132 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1138 for (i = 0; i < num_sockets; i++)
1139 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1142 if (socket_num == UMA_NO_CONFIG)
1143 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1145 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1151 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1152 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1154 * Records which Mbuf pool to use by each logical core, if needed.
1156 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1157 mbp = mbuf_pool_find(
1158 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1161 mbp = mbuf_pool_find(0);
1162 fwd_lcores[lc_id]->mbp = mbp;
1163 /* initialize GSO context */
1164 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1165 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1166 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1167 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1169 fwd_lcores[lc_id]->gso_ctx.flag = 0;
1172 /* Configuration of packet forwarding streams. */
1173 if (init_fwd_streams() < 0)
1174 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1178 /* create a gro context for each lcore */
1179 gro_param.gro_types = RTE_GRO_TCP_IPV4;
1180 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1181 gro_param.max_item_per_flow = MAX_PKT_BURST;
1182 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1183 gro_param.socket_id = rte_lcore_to_socket_id(
1184 fwd_lcores_cpuids[lc_id]);
1185 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1186 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1187 rte_exit(EXIT_FAILURE,
1188 "rte_gro_ctx_create() failed\n");
1192 #if defined RTE_LIBRTE_PMD_SOFTNIC
1193 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1194 RTE_ETH_FOREACH_DEV(pid) {
1196 const char *driver = port->dev_info.driver_name;
1198 if (strcmp(driver, "net_softnic") == 0)
1199 port->softport.fwd_lcore_arg = fwd_lcores;
1208 reconfig(portid_t new_port_id, unsigned socket_id)
1210 struct rte_port *port;
1212 /* Reconfiguration of Ethernet ports. */
1213 port = &ports[new_port_id];
1214 rte_eth_dev_info_get(new_port_id, &port->dev_info);
1216 /* set flag to initialize port/queue */
1217 port->need_reconfig = 1;
1218 port->need_reconfig_queues = 1;
1219 port->socket_id = socket_id;
1226 init_fwd_streams(void)
1229 struct rte_port *port;
1230 streamid_t sm_id, nb_fwd_streams_new;
1233 /* set socket id according to numa or not */
1234 RTE_ETH_FOREACH_DEV(pid) {
1236 if (nb_rxq > port->dev_info.max_rx_queues) {
1237 printf("Fail: nb_rxq(%d) is greater than "
1238 "max_rx_queues(%d)\n", nb_rxq,
1239 port->dev_info.max_rx_queues);
1242 if (nb_txq > port->dev_info.max_tx_queues) {
1243 printf("Fail: nb_txq(%d) is greater than "
1244 "max_tx_queues(%d)\n", nb_txq,
1245 port->dev_info.max_tx_queues);
1249 if (port_numa[pid] != NUMA_NO_CONFIG)
1250 port->socket_id = port_numa[pid];
1252 port->socket_id = rte_eth_dev_socket_id(pid);
1255 * if socket_id is invalid,
1256 * set to the first available socket.
1258 if (check_socket_id(port->socket_id) < 0)
1259 port->socket_id = socket_ids[0];
1263 if (socket_num == UMA_NO_CONFIG)
1264 port->socket_id = 0;
1266 port->socket_id = socket_num;
1270 q = RTE_MAX(nb_rxq, nb_txq);
1272 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1275 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1276 if (nb_fwd_streams_new == nb_fwd_streams)
1279 if (fwd_streams != NULL) {
1280 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1281 if (fwd_streams[sm_id] == NULL)
1283 rte_free(fwd_streams[sm_id]);
1284 fwd_streams[sm_id] = NULL;
1286 rte_free(fwd_streams);
1291 nb_fwd_streams = nb_fwd_streams_new;
1292 if (nb_fwd_streams) {
1293 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1294 sizeof(struct fwd_stream *) * nb_fwd_streams,
1295 RTE_CACHE_LINE_SIZE);
1296 if (fwd_streams == NULL)
1297 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1298 " (struct fwd_stream *)) failed\n",
1301 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1302 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1303 " struct fwd_stream", sizeof(struct fwd_stream),
1304 RTE_CACHE_LINE_SIZE);
1305 if (fwd_streams[sm_id] == NULL)
1306 rte_exit(EXIT_FAILURE, "rte_zmalloc"
1307 "(struct fwd_stream) failed\n");
1314 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1316 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1318 unsigned int total_burst;
1319 unsigned int nb_burst;
1320 unsigned int burst_stats[3];
1321 uint16_t pktnb_stats[3];
1323 int burst_percent[3];
1326 * First compute the total number of packet bursts and the
1327 * two highest numbers of bursts of the same number of packets.
1330 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1331 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1332 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1333 nb_burst = pbs->pkt_burst_spread[nb_pkt];
1336 total_burst += nb_burst;
1337 if (nb_burst > burst_stats[0]) {
1338 burst_stats[1] = burst_stats[0];
1339 pktnb_stats[1] = pktnb_stats[0];
1340 burst_stats[0] = nb_burst;
1341 pktnb_stats[0] = nb_pkt;
1342 } else if (nb_burst > burst_stats[1]) {
1343 burst_stats[1] = nb_burst;
1344 pktnb_stats[1] = nb_pkt;
1347 if (total_burst == 0)
1349 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1350 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1351 burst_percent[0], (int) pktnb_stats[0]);
1352 if (burst_stats[0] == total_burst) {
1356 if (burst_stats[0] + burst_stats[1] == total_burst) {
1357 printf(" + %d%% of %d pkts]\n",
1358 100 - burst_percent[0], pktnb_stats[1]);
1361 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1362 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1363 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1364 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1367 printf(" + %d%% of %d pkts + %d%% of others]\n",
1368 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1370 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1373 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1375 struct rte_port *port;
1378 static const char *fwd_stats_border = "----------------------";
1380 port = &ports[port_id];
1381 printf("\n %s Forward statistics for port %-2d %s\n",
1382 fwd_stats_border, port_id, fwd_stats_border);
1384 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1385 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1387 stats->ipackets, stats->imissed,
1388 (uint64_t) (stats->ipackets + stats->imissed));
1390 if (cur_fwd_eng == &csum_fwd_engine)
1391 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
1392 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1393 port->rx_bad_outer_l4_csum);
1394 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1395 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
1396 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1399 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1401 stats->opackets, port->tx_dropped,
1402 (uint64_t) (stats->opackets + port->tx_dropped));
1405 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1407 stats->ipackets, stats->imissed,
1408 (uint64_t) (stats->ipackets + stats->imissed));
1410 if (cur_fwd_eng == &csum_fwd_engine)
1411 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
1412 port->rx_bad_ip_csum, port->rx_bad_l4_csum,
1413 port->rx_bad_outer_l4_csum);
1414 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1415 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1416 printf(" RX-nombufs: %14"PRIu64"\n",
1420 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1422 stats->opackets, port->tx_dropped,
1423 (uint64_t) (stats->opackets + port->tx_dropped));
1426 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1427 if (port->rx_stream)
1428 pkt_burst_stats_display("RX",
1429 &port->rx_stream->rx_burst_stats);
1430 if (port->tx_stream)
1431 pkt_burst_stats_display("TX",
1432 &port->tx_stream->tx_burst_stats);
1435 if (port->rx_queue_stats_mapping_enabled) {
1437 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1438 printf(" Stats reg %2d RX-packets:%14"PRIu64
1439 " RX-errors:%14"PRIu64
1440 " RX-bytes:%14"PRIu64"\n",
1441 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1445 if (port->tx_queue_stats_mapping_enabled) {
1446 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1447 printf(" Stats reg %2d TX-packets:%14"PRIu64
1448 " TX-bytes:%14"PRIu64"\n",
1449 i, stats->q_opackets[i], stats->q_obytes[i]);
1453 printf(" %s--------------------------------%s\n",
1454 fwd_stats_border, fwd_stats_border);
1458 fwd_stream_stats_display(streamid_t stream_id)
1460 struct fwd_stream *fs;
1461 static const char *fwd_top_stats_border = "-------";
1463 fs = fwd_streams[stream_id];
1464 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1465 (fs->fwd_dropped == 0))
1467 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1468 "TX Port=%2d/Queue=%2d %s\n",
1469 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1470 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1471 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1472 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1474 /* if checksum mode */
1475 if (cur_fwd_eng == &csum_fwd_engine) {
1476 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1477 "%-14u Rx- bad outer L4 checksum: %-14u\n",
1478 fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1479 fs->rx_bad_outer_l4_csum);
1482 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1483 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1484 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1489 flush_fwd_rx_queues(void)
1491 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1498 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1499 uint64_t timer_period;
1501 /* convert to number of cycles */
1502 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1504 for (j = 0; j < 2; j++) {
1505 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1506 for (rxq = 0; rxq < nb_rxq; rxq++) {
1507 port_id = fwd_ports_ids[rxp];
1509 * testpmd can stuck in the below do while loop
1510 * if rte_eth_rx_burst() always returns nonzero
1511 * packets. So timer is added to exit this loop
1512 * after 1sec timer expiry.
1514 prev_tsc = rte_rdtsc();
1516 nb_rx = rte_eth_rx_burst(port_id, rxq,
1517 pkts_burst, MAX_PKT_BURST);
1518 for (i = 0; i < nb_rx; i++)
1519 rte_pktmbuf_free(pkts_burst[i]);
1521 cur_tsc = rte_rdtsc();
1522 diff_tsc = cur_tsc - prev_tsc;
1523 timer_tsc += diff_tsc;
1524 } while ((nb_rx > 0) &&
1525 (timer_tsc < timer_period));
1529 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1534 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1536 struct fwd_stream **fsm;
1539 #ifdef RTE_LIBRTE_BITRATE
1540 uint64_t tics_per_1sec;
1541 uint64_t tics_datum;
1542 uint64_t tics_current;
1543 uint16_t i, cnt_ports;
1545 cnt_ports = nb_ports;
1546 tics_datum = rte_rdtsc();
1547 tics_per_1sec = rte_get_timer_hz();
1549 fsm = &fwd_streams[fc->stream_idx];
1550 nb_fs = fc->stream_nb;
1552 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1553 (*pkt_fwd)(fsm[sm_id]);
1554 #ifdef RTE_LIBRTE_BITRATE
1555 if (bitrate_enabled != 0 &&
1556 bitrate_lcore_id == rte_lcore_id()) {
1557 tics_current = rte_rdtsc();
1558 if (tics_current - tics_datum >= tics_per_1sec) {
1559 /* Periodic bitrate calculation */
1560 for (i = 0; i < cnt_ports; i++)
1561 rte_stats_bitrate_calc(bitrate_data,
1563 tics_datum = tics_current;
1567 #ifdef RTE_LIBRTE_LATENCY_STATS
1568 if (latencystats_enabled != 0 &&
1569 latencystats_lcore_id == rte_lcore_id())
1570 rte_latencystats_update();
1573 } while (! fc->stopped);
1577 start_pkt_forward_on_core(void *fwd_arg)
1579 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1580 cur_fwd_config.fwd_eng->packet_fwd);
1585 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1586 * Used to start communication flows in network loopback test configurations.
1589 run_one_txonly_burst_on_core(void *fwd_arg)
1591 struct fwd_lcore *fwd_lc;
1592 struct fwd_lcore tmp_lcore;
1594 fwd_lc = (struct fwd_lcore *) fwd_arg;
1595 tmp_lcore = *fwd_lc;
1596 tmp_lcore.stopped = 1;
1597 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1602 * Launch packet forwarding:
1603 * - Setup per-port forwarding context.
1604 * - launch logical cores with their forwarding configuration.
1607 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1609 port_fwd_begin_t port_fwd_begin;
1614 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1615 if (port_fwd_begin != NULL) {
1616 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1617 (*port_fwd_begin)(fwd_ports_ids[i]);
1619 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1620 lc_id = fwd_lcores_cpuids[i];
1621 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1622 fwd_lcores[i]->stopped = 0;
1623 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1624 fwd_lcores[i], lc_id);
1626 printf("launch lcore %u failed - diag=%d\n",
1633 * Launch packet forwarding configuration.
1636 start_packet_forwarding(int with_tx_first)
1638 port_fwd_begin_t port_fwd_begin;
1639 port_fwd_end_t port_fwd_end;
1640 struct rte_port *port;
1645 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1646 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1648 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1649 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1651 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1652 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1653 (!nb_rxq || !nb_txq))
1654 rte_exit(EXIT_FAILURE,
1655 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1656 cur_fwd_eng->fwd_mode_name);
1658 if (all_ports_started() == 0) {
1659 printf("Not all ports were started\n");
1662 if (test_done == 0) {
1663 printf("Packet forwarding already started\n");
1669 for (i = 0; i < nb_fwd_ports; i++) {
1670 pt_id = fwd_ports_ids[i];
1671 port = &ports[pt_id];
1672 if (!port->dcb_flag) {
1673 printf("In DCB mode, all forwarding ports must "
1674 "be configured in this mode.\n");
1678 if (nb_fwd_lcores == 1) {
1679 printf("In DCB mode,the nb forwarding cores "
1680 "should be larger than 1.\n");
1689 flush_fwd_rx_queues();
1691 pkt_fwd_config_display(&cur_fwd_config);
1692 rxtx_config_display();
1694 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1695 pt_id = fwd_ports_ids[i];
1696 port = &ports[pt_id];
1697 rte_eth_stats_get(pt_id, &port->stats);
1698 port->tx_dropped = 0;
1700 map_port_queue_stats_mapping_registers(pt_id, port);
1702 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1703 fwd_streams[sm_id]->rx_packets = 0;
1704 fwd_streams[sm_id]->tx_packets = 0;
1705 fwd_streams[sm_id]->fwd_dropped = 0;
1706 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1707 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1708 fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
1710 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1711 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1712 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1713 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1714 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1716 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1717 fwd_streams[sm_id]->core_cycles = 0;
1720 if (with_tx_first) {
1721 port_fwd_begin = tx_only_engine.port_fwd_begin;
1722 if (port_fwd_begin != NULL) {
1723 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1724 (*port_fwd_begin)(fwd_ports_ids[i]);
1726 while (with_tx_first--) {
1727 launch_packet_forwarding(
1728 run_one_txonly_burst_on_core);
1729 rte_eal_mp_wait_lcore();
1731 port_fwd_end = tx_only_engine.port_fwd_end;
1732 if (port_fwd_end != NULL) {
1733 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1734 (*port_fwd_end)(fwd_ports_ids[i]);
1737 launch_packet_forwarding(start_pkt_forward_on_core);
1741 stop_packet_forwarding(void)
1743 struct rte_eth_stats stats;
1744 struct rte_port *port;
1745 port_fwd_end_t port_fwd_end;
1750 uint64_t total_recv;
1751 uint64_t total_xmit;
1752 uint64_t total_rx_dropped;
1753 uint64_t total_tx_dropped;
1754 uint64_t total_rx_nombuf;
1755 uint64_t tx_dropped;
1756 uint64_t rx_bad_ip_csum;
1757 uint64_t rx_bad_l4_csum;
1758 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1759 uint64_t fwd_cycles;
1762 static const char *acc_stats_border = "+++++++++++++++";
1765 printf("Packet forwarding not started\n");
1768 printf("Telling cores to stop...");
1769 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1770 fwd_lcores[lc_id]->stopped = 1;
1771 printf("\nWaiting for lcores to finish...\n");
1772 rte_eal_mp_wait_lcore();
1773 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1774 if (port_fwd_end != NULL) {
1775 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1776 pt_id = fwd_ports_ids[i];
1777 (*port_fwd_end)(pt_id);
1780 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1783 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1784 if (cur_fwd_config.nb_fwd_streams >
1785 cur_fwd_config.nb_fwd_ports) {
1786 fwd_stream_stats_display(sm_id);
1787 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1788 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1790 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1792 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1795 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1796 tx_dropped = (uint64_t) (tx_dropped +
1797 fwd_streams[sm_id]->fwd_dropped);
1798 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1801 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1802 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1803 fwd_streams[sm_id]->rx_bad_ip_csum);
1804 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1808 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1809 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1810 fwd_streams[sm_id]->rx_bad_l4_csum);
1811 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1814 ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
1815 fwd_streams[sm_id]->rx_bad_outer_l4_csum;
1817 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1818 fwd_cycles = (uint64_t) (fwd_cycles +
1819 fwd_streams[sm_id]->core_cycles);
1824 total_rx_dropped = 0;
1825 total_tx_dropped = 0;
1826 total_rx_nombuf = 0;
1827 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1828 pt_id = fwd_ports_ids[i];
1830 port = &ports[pt_id];
1831 rte_eth_stats_get(pt_id, &stats);
1832 stats.ipackets -= port->stats.ipackets;
1833 port->stats.ipackets = 0;
1834 stats.opackets -= port->stats.opackets;
1835 port->stats.opackets = 0;
1836 stats.ibytes -= port->stats.ibytes;
1837 port->stats.ibytes = 0;
1838 stats.obytes -= port->stats.obytes;
1839 port->stats.obytes = 0;
1840 stats.imissed -= port->stats.imissed;
1841 port->stats.imissed = 0;
1842 stats.oerrors -= port->stats.oerrors;
1843 port->stats.oerrors = 0;
1844 stats.rx_nombuf -= port->stats.rx_nombuf;
1845 port->stats.rx_nombuf = 0;
1847 total_recv += stats.ipackets;
1848 total_xmit += stats.opackets;
1849 total_rx_dropped += stats.imissed;
1850 total_tx_dropped += port->tx_dropped;
1851 total_rx_nombuf += stats.rx_nombuf;
1853 fwd_port_stats_display(pt_id, &stats);
1856 printf("\n %s Accumulated forward statistics for all ports"
1858 acc_stats_border, acc_stats_border);
1859 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1861 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1863 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1864 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1865 if (total_rx_nombuf > 0)
1866 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1867 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1869 acc_stats_border, acc_stats_border);
1870 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1872 printf("\n CPU cycles/packet=%u (total cycles="
1873 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1874 (unsigned int)(fwd_cycles / total_recv),
1875 fwd_cycles, total_recv);
1877 printf("\nDone.\n");
1882 dev_set_link_up(portid_t pid)
1884 if (rte_eth_dev_set_link_up(pid) < 0)
1885 printf("\nSet link up fail.\n");
1889 dev_set_link_down(portid_t pid)
1891 if (rte_eth_dev_set_link_down(pid) < 0)
1892 printf("\nSet link down fail.\n");
1896 all_ports_started(void)
1899 struct rte_port *port;
1901 RTE_ETH_FOREACH_DEV(pi) {
1903 /* Check if there is a port which is not started */
1904 if ((port->port_status != RTE_PORT_STARTED) &&
1905 (port->slave_flag == 0))
1909 /* No port is not started */
1914 port_is_stopped(portid_t port_id)
1916 struct rte_port *port = &ports[port_id];
1918 if ((port->port_status != RTE_PORT_STOPPED) &&
1919 (port->slave_flag == 0))
1925 all_ports_stopped(void)
1929 RTE_ETH_FOREACH_DEV(pi) {
1930 if (!port_is_stopped(pi))
1938 port_is_started(portid_t port_id)
1940 if (port_id_is_invalid(port_id, ENABLED_WARN))
1943 if (ports[port_id].port_status != RTE_PORT_STARTED)
1950 start_port(portid_t pid)
1952 int diag, need_check_link_status = -1;
1955 struct rte_port *port;
1956 struct ether_addr mac_addr;
1958 if (port_id_is_invalid(pid, ENABLED_WARN))
1963 RTE_ETH_FOREACH_DEV(pi) {
1964 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1967 need_check_link_status = 0;
1969 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1970 RTE_PORT_HANDLING) == 0) {
1971 printf("Port %d is now not stopped\n", pi);
1975 if (port->need_reconfig > 0) {
1976 port->need_reconfig = 0;
1978 if (flow_isolate_all) {
1979 int ret = port_flow_isolate(pi, 1);
1981 printf("Failed to apply isolated"
1982 " mode on port %d\n", pi);
1986 configure_rxtx_dump_callbacks(0);
1987 printf("Configuring Port %d (socket %u)\n", pi,
1989 /* configure port */
1990 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1993 if (rte_atomic16_cmpset(&(port->port_status),
1994 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1995 printf("Port %d can not be set back "
1996 "to stopped\n", pi);
1997 printf("Fail to configure port %d\n", pi);
1998 /* try to reconfigure port next time */
1999 port->need_reconfig = 1;
2003 if (port->need_reconfig_queues > 0) {
2004 port->need_reconfig_queues = 0;
2005 /* setup tx queues */
2006 for (qi = 0; qi < nb_txq; qi++) {
2007 if ((numa_support) &&
2008 (txring_numa[pi] != NUMA_NO_CONFIG))
2009 diag = rte_eth_tx_queue_setup(pi, qi,
2010 port->nb_tx_desc[qi],
2012 &(port->tx_conf[qi]));
2014 diag = rte_eth_tx_queue_setup(pi, qi,
2015 port->nb_tx_desc[qi],
2017 &(port->tx_conf[qi]));
2022 /* Fail to setup tx queue, return */
2023 if (rte_atomic16_cmpset(&(port->port_status),
2025 RTE_PORT_STOPPED) == 0)
2026 printf("Port %d can not be set back "
2027 "to stopped\n", pi);
2028 printf("Fail to configure port %d tx queues\n",
2030 /* try to reconfigure queues next time */
2031 port->need_reconfig_queues = 1;
2034 for (qi = 0; qi < nb_rxq; qi++) {
2035 /* setup rx queues */
2036 if ((numa_support) &&
2037 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
2038 struct rte_mempool * mp =
2039 mbuf_pool_find(rxring_numa[pi]);
2041 printf("Failed to setup RX queue:"
2042 "No mempool allocation"
2043 " on the socket %d\n",
2048 diag = rte_eth_rx_queue_setup(pi, qi,
2049 port->nb_rx_desc[qi],
2051 &(port->rx_conf[qi]),
2054 struct rte_mempool *mp =
2055 mbuf_pool_find(port->socket_id);
2057 printf("Failed to setup RX queue:"
2058 "No mempool allocation"
2059 " on the socket %d\n",
2063 diag = rte_eth_rx_queue_setup(pi, qi,
2064 port->nb_rx_desc[qi],
2066 &(port->rx_conf[qi]),
2072 /* Fail to setup rx queue, return */
2073 if (rte_atomic16_cmpset(&(port->port_status),
2075 RTE_PORT_STOPPED) == 0)
2076 printf("Port %d can not be set back "
2077 "to stopped\n", pi);
2078 printf("Fail to configure port %d rx queues\n",
2080 /* try to reconfigure queues next time */
2081 port->need_reconfig_queues = 1;
2085 configure_rxtx_dump_callbacks(verbose_level);
2087 if (rte_eth_dev_start(pi) < 0) {
2088 printf("Fail to start port %d\n", pi);
2090 /* Fail to setup rx queue, return */
2091 if (rte_atomic16_cmpset(&(port->port_status),
2092 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2093 printf("Port %d can not be set back to "
2098 if (rte_atomic16_cmpset(&(port->port_status),
2099 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2100 printf("Port %d can not be set into started\n", pi);
2102 rte_eth_macaddr_get(pi, &mac_addr);
2103 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2104 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2105 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2106 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2108 /* at least one port started, need checking link status */
2109 need_check_link_status = 1;
2112 if (need_check_link_status == 1 && !no_link_check)
2113 check_all_ports_link_status(RTE_PORT_ALL);
2114 else if (need_check_link_status == 0)
2115 printf("Please stop the ports first\n");
2122 stop_port(portid_t pid)
2125 struct rte_port *port;
2126 int need_check_link_status = 0;
2133 if (port_id_is_invalid(pid, ENABLED_WARN))
2136 printf("Stopping ports...\n");
2138 RTE_ETH_FOREACH_DEV(pi) {
2139 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2142 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2143 printf("Please remove port %d from forwarding configuration.\n", pi);
2147 if (port_is_bonding_slave(pi)) {
2148 printf("Please remove port %d from bonded device.\n", pi);
2153 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2154 RTE_PORT_HANDLING) == 0)
2157 rte_eth_dev_stop(pi);
2159 if (rte_atomic16_cmpset(&(port->port_status),
2160 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2161 printf("Port %d can not be set into stopped\n", pi);
2162 need_check_link_status = 1;
2164 if (need_check_link_status && !no_link_check)
2165 check_all_ports_link_status(RTE_PORT_ALL);
2171 remove_invalid_ports_in(portid_t *array, portid_t *total)
2174 portid_t new_total = 0;
2176 for (i = 0; i < *total; i++)
2177 if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2178 array[new_total] = array[i];
2185 remove_invalid_ports(void)
2187 remove_invalid_ports_in(ports_ids, &nb_ports);
2188 remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2189 nb_cfg_ports = nb_fwd_ports;
2193 close_port(portid_t pid)
2196 struct rte_port *port;
2198 if (port_id_is_invalid(pid, ENABLED_WARN))
2201 printf("Closing ports...\n");
2203 RTE_ETH_FOREACH_DEV(pi) {
2204 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2207 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2208 printf("Please remove port %d from forwarding configuration.\n", pi);
2212 if (port_is_bonding_slave(pi)) {
2213 printf("Please remove port %d from bonded device.\n", pi);
2218 if (rte_atomic16_cmpset(&(port->port_status),
2219 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2220 printf("Port %d is already closed\n", pi);
2224 if (rte_atomic16_cmpset(&(port->port_status),
2225 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2226 printf("Port %d is now not stopped\n", pi);
2230 if (port->flow_list)
2231 port_flow_flush(pi);
2232 rte_eth_dev_close(pi);
2234 remove_invalid_ports();
2236 if (rte_atomic16_cmpset(&(port->port_status),
2237 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2238 printf("Port %d cannot be set to closed\n", pi);
2245 reset_port(portid_t pid)
2249 struct rte_port *port;
2251 if (port_id_is_invalid(pid, ENABLED_WARN))
2254 printf("Resetting ports...\n");
2256 RTE_ETH_FOREACH_DEV(pi) {
2257 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2260 if (port_is_forwarding(pi) != 0 && test_done == 0) {
2261 printf("Please remove port %d from forwarding "
2262 "configuration.\n", pi);
2266 if (port_is_bonding_slave(pi)) {
2267 printf("Please remove port %d from bonded device.\n",
2272 diag = rte_eth_dev_reset(pi);
2275 port->need_reconfig = 1;
2276 port->need_reconfig_queues = 1;
2278 printf("Failed to reset port %d. diag=%d\n", pi, diag);
2286 attach_port(char *identifier)
2289 struct rte_dev_iterator iterator;
2291 printf("Attaching a new port...\n");
2293 if (identifier == NULL) {
2294 printf("Invalid parameters are specified\n");
2298 if (rte_dev_probe(identifier) != 0) {
2299 TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2303 RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2304 if (port_is_forwarding(pi))
2305 continue; /* port was already attached before */
2306 setup_attached_port(pi);
2311 setup_attached_port(portid_t pi)
2313 unsigned int socket_id;
2315 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2316 /* if socket_id is invalid, set to the first available socket. */
2317 if (check_socket_id(socket_id) < 0)
2318 socket_id = socket_ids[0];
2319 reconfig(pi, socket_id);
2320 rte_eth_promiscuous_enable(pi);
2322 ports_ids[nb_ports++] = pi;
2323 fwd_ports_ids[nb_fwd_ports++] = pi;
2324 nb_cfg_ports = nb_fwd_ports;
2325 ports[pi].port_status = RTE_PORT_STOPPED;
2327 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2332 detach_port_device(portid_t port_id)
2334 struct rte_device *dev;
2337 printf("Removing a device...\n");
2339 dev = rte_eth_devices[port_id].device;
2341 printf("Device already removed\n");
2345 if (ports[port_id].port_status != RTE_PORT_CLOSED) {
2346 if (ports[port_id].port_status != RTE_PORT_STOPPED) {
2347 printf("Port not stopped\n");
2350 printf("Port was not closed\n");
2351 if (ports[port_id].flow_list)
2352 port_flow_flush(port_id);
2355 if (rte_dev_remove(dev) != 0) {
2356 TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
2360 for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
2361 if (rte_eth_devices[sibling].device != dev)
2363 /* reset mapping between old ports and removed device */
2364 rte_eth_devices[sibling].device = NULL;
2365 if (ports[sibling].port_status != RTE_PORT_CLOSED) {
2366 /* sibling ports are forced to be closed */
2367 ports[sibling].port_status = RTE_PORT_CLOSED;
2368 printf("Port %u is closed\n", sibling);
2372 remove_invalid_ports();
2374 printf("Device of port %u is detached\n", port_id);
2375 printf("Now total ports is %d\n", nb_ports);
2383 struct rte_device *device;
2388 stop_packet_forwarding();
2390 if (ports != NULL) {
2392 RTE_ETH_FOREACH_DEV(pt_id) {
2393 printf("\nShutting down port %d...\n", pt_id);
2399 * This is a workaround to fix a virtio-user issue that
2400 * requires to call clean-up routine to remove existing
2402 * This workaround valid only for testpmd, needs a fix
2403 * valid for all applications.
2404 * TODO: Implement proper resource cleanup
2406 device = rte_eth_devices[pt_id].device;
2407 if (device && !strcmp(device->driver->name, "net_virtio_user"))
2408 detach_port_device(pt_id);
2413 ret = rte_dev_event_monitor_stop();
2416 "fail to stop device event monitor.");
2420 ret = rte_dev_event_callback_unregister(NULL,
2421 eth_dev_event_callback, NULL);
2424 "fail to unregister device event callback.\n");
2428 ret = rte_dev_hotplug_handle_disable();
2431 "fail to disable hotplug handling.\n");
2436 printf("\nBye...\n");
2439 typedef void (*cmd_func_t)(void);
2440 struct pmd_test_command {
2441 const char *cmd_name;
2442 cmd_func_t cmd_func;
2445 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2447 /* Check the link status of all ports in up to 9s, and print them finally */
2449 check_all_ports_link_status(uint32_t port_mask)
2451 #define CHECK_INTERVAL 100 /* 100ms */
2452 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2454 uint8_t count, all_ports_up, print_flag = 0;
2455 struct rte_eth_link link;
2457 printf("Checking link statuses...\n");
2459 for (count = 0; count <= MAX_CHECK_TIME; count++) {
2461 RTE_ETH_FOREACH_DEV(portid) {
2462 if ((port_mask & (1 << portid)) == 0)
2464 memset(&link, 0, sizeof(link));
2465 rte_eth_link_get_nowait(portid, &link);
2466 /* print link status if flag set */
2467 if (print_flag == 1) {
2468 if (link.link_status)
2470 "Port%d Link Up. speed %u Mbps- %s\n",
2471 portid, link.link_speed,
2472 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2473 ("full-duplex") : ("half-duplex\n"));
2475 printf("Port %d Link Down\n", portid);
2478 /* clear all_ports_up flag if any link down */
2479 if (link.link_status == ETH_LINK_DOWN) {
2484 /* after finally printing all link status, get out */
2485 if (print_flag == 1)
2488 if (all_ports_up == 0) {
2490 rte_delay_ms(CHECK_INTERVAL);
2493 /* set the print_flag if all ports up or timeout */
2494 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2504 rmv_event_callback(void *arg)
2506 int need_to_start = 0;
2507 int org_no_link_check = no_link_check;
2508 portid_t port_id = (intptr_t)arg;
2510 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2512 if (!test_done && port_is_forwarding(port_id)) {
2514 stop_packet_forwarding();
2518 no_link_check = org_no_link_check;
2519 close_port(port_id);
2520 detach_port_device(port_id);
2522 start_packet_forwarding(0);
2525 /* This function is used by the interrupt thread */
2527 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2530 RTE_SET_USED(param);
2531 RTE_SET_USED(ret_param);
2533 if (type >= RTE_ETH_EVENT_MAX) {
2534 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2535 port_id, __func__, type);
2537 } else if (event_print_mask & (UINT32_C(1) << type)) {
2538 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2539 eth_event_desc[type]);
2543 if (port_id_is_invalid(port_id, DISABLED_WARN))
2547 case RTE_ETH_EVENT_INTR_RMV:
2548 if (rte_eal_alarm_set(100000,
2549 rmv_event_callback, (void *)(intptr_t)port_id))
2550 fprintf(stderr, "Could not set up deferred device removal\n");
2559 register_eth_event_callback(void)
2562 enum rte_eth_event_type event;
2564 for (event = RTE_ETH_EVENT_UNKNOWN;
2565 event < RTE_ETH_EVENT_MAX; event++) {
2566 ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
2571 TESTPMD_LOG(ERR, "Failed to register callback for "
2572 "%s event\n", eth_event_desc[event]);
2580 /* This function is used by the interrupt thread */
2582 eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
2583 __rte_unused void *arg)
2588 if (type >= RTE_DEV_EVENT_MAX) {
2589 fprintf(stderr, "%s called upon invalid event %d\n",
2595 case RTE_DEV_EVENT_REMOVE:
2596 RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2598 ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
2600 RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
2604 rmv_event_callback((void *)(intptr_t)port_id);
2606 case RTE_DEV_EVENT_ADD:
2607 RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2609 /* TODO: After finish kernel driver binding,
2610 * begin to attach port.
2619 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2623 uint8_t mapping_found = 0;
2625 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2626 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2627 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2628 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2629 tx_queue_stats_mappings[i].queue_id,
2630 tx_queue_stats_mappings[i].stats_counter_id);
2637 port->tx_queue_stats_mapping_enabled = 1;
2642 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2646 uint8_t mapping_found = 0;
2648 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2649 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2650 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2651 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2652 rx_queue_stats_mappings[i].queue_id,
2653 rx_queue_stats_mappings[i].stats_counter_id);
2660 port->rx_queue_stats_mapping_enabled = 1;
2665 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2669 diag = set_tx_queue_stats_mapping_registers(pi, port);
2671 if (diag == -ENOTSUP) {
2672 port->tx_queue_stats_mapping_enabled = 0;
2673 printf("TX queue stats mapping not supported port id=%d\n", pi);
2676 rte_exit(EXIT_FAILURE,
2677 "set_tx_queue_stats_mapping_registers "
2678 "failed for port id=%d diag=%d\n",
2682 diag = set_rx_queue_stats_mapping_registers(pi, port);
2684 if (diag == -ENOTSUP) {
2685 port->rx_queue_stats_mapping_enabled = 0;
2686 printf("RX queue stats mapping not supported port id=%d\n", pi);
2689 rte_exit(EXIT_FAILURE,
2690 "set_rx_queue_stats_mapping_registers "
2691 "failed for port id=%d diag=%d\n",
2697 rxtx_port_config(struct rte_port *port)
2701 for (qid = 0; qid < nb_rxq; qid++) {
2702 port->rx_conf[qid] = port->dev_info.default_rxconf;
2704 /* Check if any Rx parameters have been passed */
2705 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2706 port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2708 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2709 port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2711 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2712 port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2714 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2715 port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2717 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2718 port->rx_conf[qid].rx_drop_en = rx_drop_en;
2720 port->nb_rx_desc[qid] = nb_rxd;
2723 for (qid = 0; qid < nb_txq; qid++) {
2724 port->tx_conf[qid] = port->dev_info.default_txconf;
2726 /* Check if any Tx parameters have been passed */
2727 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2728 port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2730 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2731 port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2733 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2734 port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2736 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2737 port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2739 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2740 port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2742 port->nb_tx_desc[qid] = nb_txd;
2747 init_port_config(void)
2750 struct rte_port *port;
2752 RTE_ETH_FOREACH_DEV(pid) {
2754 port->dev_conf.fdir_conf = fdir_conf;
2755 rte_eth_dev_info_get(pid, &port->dev_info);
2757 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2758 port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2759 rss_hf & port->dev_info.flow_type_rss_offloads;
2761 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2762 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2765 if (port->dcb_flag == 0) {
2766 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2767 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2769 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2772 rxtx_port_config(port);
2774 rte_eth_macaddr_get(pid, &port->eth_addr);
2776 map_port_queue_stats_mapping_registers(pid, port);
2777 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2778 rte_pmd_ixgbe_bypass_init(pid);
2781 if (lsc_interrupt &&
2782 (rte_eth_devices[pid].data->dev_flags &
2783 RTE_ETH_DEV_INTR_LSC))
2784 port->dev_conf.intr_conf.lsc = 1;
2785 if (rmv_interrupt &&
2786 (rte_eth_devices[pid].data->dev_flags &
2787 RTE_ETH_DEV_INTR_RMV))
2788 port->dev_conf.intr_conf.rmv = 1;
2792 void set_port_slave_flag(portid_t slave_pid)
2794 struct rte_port *port;
2796 port = &ports[slave_pid];
2797 port->slave_flag = 1;
2800 void clear_port_slave_flag(portid_t slave_pid)
2802 struct rte_port *port;
2804 port = &ports[slave_pid];
2805 port->slave_flag = 0;
2808 uint8_t port_is_bonding_slave(portid_t slave_pid)
2810 struct rte_port *port;
2812 port = &ports[slave_pid];
2813 if ((rte_eth_devices[slave_pid].data->dev_flags &
2814 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2819 const uint16_t vlan_tags[] = {
2820 0, 1, 2, 3, 4, 5, 6, 7,
2821 8, 9, 10, 11, 12, 13, 14, 15,
2822 16, 17, 18, 19, 20, 21, 22, 23,
2823 24, 25, 26, 27, 28, 29, 30, 31
2827 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2828 enum dcb_mode_enable dcb_mode,
2829 enum rte_eth_nb_tcs num_tcs,
2834 struct rte_eth_rss_conf rss_conf;
2837 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2838 * given above, and the number of traffic classes available for use.
2840 if (dcb_mode == DCB_VT_ENABLED) {
2841 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2842 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2843 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2844 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2846 /* VMDQ+DCB RX and TX configurations */
2847 vmdq_rx_conf->enable_default_pool = 0;
2848 vmdq_rx_conf->default_pool = 0;
2849 vmdq_rx_conf->nb_queue_pools =
2850 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2851 vmdq_tx_conf->nb_queue_pools =
2852 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2854 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2855 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2856 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2857 vmdq_rx_conf->pool_map[i].pools =
2858 1 << (i % vmdq_rx_conf->nb_queue_pools);
2860 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2861 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2862 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2865 /* set DCB mode of RX and TX of multiple queues */
2866 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2867 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2869 struct rte_eth_dcb_rx_conf *rx_conf =
2870 ð_conf->rx_adv_conf.dcb_rx_conf;
2871 struct rte_eth_dcb_tx_conf *tx_conf =
2872 ð_conf->tx_adv_conf.dcb_tx_conf;
2874 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2878 rx_conf->nb_tcs = num_tcs;
2879 tx_conf->nb_tcs = num_tcs;
2881 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2882 rx_conf->dcb_tc[i] = i % num_tcs;
2883 tx_conf->dcb_tc[i] = i % num_tcs;
2886 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2887 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2888 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2892 eth_conf->dcb_capability_en =
2893 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2895 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2901 init_port_dcb_config(portid_t pid,
2902 enum dcb_mode_enable dcb_mode,
2903 enum rte_eth_nb_tcs num_tcs,
2906 struct rte_eth_conf port_conf;
2907 struct rte_port *rte_port;
2911 rte_port = &ports[pid];
2913 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2914 /* Enter DCB configuration status */
2917 port_conf.rxmode = rte_port->dev_conf.rxmode;
2918 port_conf.txmode = rte_port->dev_conf.txmode;
2920 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2921 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2924 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2926 /* re-configure the device . */
2927 rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2929 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2931 /* If dev_info.vmdq_pool_base is greater than 0,
2932 * the queue id of vmdq pools is started after pf queues.
2934 if (dcb_mode == DCB_VT_ENABLED &&
2935 rte_port->dev_info.vmdq_pool_base > 0) {
2936 printf("VMDQ_DCB multi-queue mode is nonsensical"
2937 " for port %d.", pid);
2941 /* Assume the ports in testpmd have the same dcb capability
2942 * and has the same number of rxq and txq in dcb mode
2944 if (dcb_mode == DCB_VT_ENABLED) {
2945 if (rte_port->dev_info.max_vfs > 0) {
2946 nb_rxq = rte_port->dev_info.nb_rx_queues;
2947 nb_txq = rte_port->dev_info.nb_tx_queues;
2949 nb_rxq = rte_port->dev_info.max_rx_queues;
2950 nb_txq = rte_port->dev_info.max_tx_queues;
2953 /*if vt is disabled, use all pf queues */
2954 if (rte_port->dev_info.vmdq_pool_base == 0) {
2955 nb_rxq = rte_port->dev_info.max_rx_queues;
2956 nb_txq = rte_port->dev_info.max_tx_queues;
2958 nb_rxq = (queueid_t)num_tcs;
2959 nb_txq = (queueid_t)num_tcs;
2963 rx_free_thresh = 64;
2965 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2967 rxtx_port_config(rte_port);
2969 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2970 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2971 rx_vft_set(pid, vlan_tags[i], 1);
2973 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2974 map_port_queue_stats_mapping_registers(pid, rte_port);
2976 rte_port->dcb_flag = 1;
2984 /* Configuration of Ethernet ports. */
2985 ports = rte_zmalloc("testpmd: ports",
2986 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2987 RTE_CACHE_LINE_SIZE);
2988 if (ports == NULL) {
2989 rte_exit(EXIT_FAILURE,
2990 "rte_zmalloc(%d struct rte_port) failed\n",
2994 /* Initialize ports NUMA structures */
2995 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2996 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2997 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3011 const char clr[] = { 27, '[', '2', 'J', '\0' };
3012 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3014 /* Clear screen and move to top left */
3015 printf("%s%s", clr, top_left);
3017 printf("\nPort statistics ====================================");
3018 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3019 nic_stats_display(fwd_ports_ids[i]);
3023 signal_handler(int signum)
3025 if (signum == SIGINT || signum == SIGTERM) {
3026 printf("\nSignal %d received, preparing to exit...\n",
3028 #ifdef RTE_LIBRTE_PDUMP
3029 /* uninitialize packet capture framework */
3032 #ifdef RTE_LIBRTE_LATENCY_STATS
3033 rte_latencystats_uninit();
3036 /* Set flag to indicate the force termination. */
3038 /* exit with the expected status */
3039 signal(signum, SIG_DFL);
3040 kill(getpid(), signum);
3045 main(int argc, char** argv)
3052 signal(SIGINT, signal_handler);
3053 signal(SIGTERM, signal_handler);
3055 diag = rte_eal_init(argc, argv);
3057 rte_panic("Cannot init EAL\n");
3059 testpmd_logtype = rte_log_register("testpmd");
3060 if (testpmd_logtype < 0)
3061 rte_panic("Cannot register log type");
3062 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3064 ret = register_eth_event_callback();
3066 rte_panic("Cannot register for ethdev events");
3068 #ifdef RTE_LIBRTE_PDUMP
3069 /* initialize packet capture framework */
3070 rte_pdump_init(NULL);
3074 RTE_ETH_FOREACH_DEV(port_id) {
3075 ports_ids[count] = port_id;
3078 nb_ports = (portid_t) count;
3080 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3082 /* allocate port structures, and init them */
3085 set_def_fwd_config();
3087 rte_panic("Empty set of forwarding logical cores - check the "
3088 "core mask supplied in the command parameters\n");
3090 /* Bitrate/latency stats disabled by default */
3091 #ifdef RTE_LIBRTE_BITRATE
3092 bitrate_enabled = 0;
3094 #ifdef RTE_LIBRTE_LATENCY_STATS
3095 latencystats_enabled = 0;
3098 /* on FreeBSD, mlockall() is disabled by default */
3099 #ifdef RTE_EXEC_ENV_BSDAPP
3108 launch_args_parse(argc, argv);
3110 if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3111 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3115 if (tx_first && interactive)
3116 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3117 "interactive mode.\n");
3119 if (tx_first && lsc_interrupt) {
3120 printf("Warning: lsc_interrupt needs to be off when "
3121 " using tx_first. Disabling.\n");
3125 if (!nb_rxq && !nb_txq)
3126 printf("Warning: Either rx or tx queues should be non-zero\n");
3128 if (nb_rxq > 1 && nb_rxq > nb_txq)
3129 printf("Warning: nb_rxq=%d enables RSS configuration, "
3130 "but nb_txq=%d will prevent to fully test it.\n",
3136 ret = rte_dev_hotplug_handle_enable();
3139 "fail to enable hotplug handling.");
3143 ret = rte_dev_event_monitor_start();
3146 "fail to start device event monitoring.");
3150 ret = rte_dev_event_callback_register(NULL,
3151 eth_dev_event_callback, NULL);
3154 "fail to register device event callback\n");
3159 if (start_port(RTE_PORT_ALL) != 0)
3160 rte_exit(EXIT_FAILURE, "Start ports failed\n");
3162 /* set all ports to promiscuous mode by default */
3163 RTE_ETH_FOREACH_DEV(port_id)
3164 rte_eth_promiscuous_enable(port_id);
3166 /* Init metrics library */
3167 rte_metrics_init(rte_socket_id());
3169 #ifdef RTE_LIBRTE_LATENCY_STATS
3170 if (latencystats_enabled != 0) {
3171 int ret = rte_latencystats_init(1, NULL);
3173 printf("Warning: latencystats init()"
3174 " returned error %d\n", ret);
3175 printf("Latencystats running on lcore %d\n",
3176 latencystats_lcore_id);
3180 /* Setup bitrate stats */
3181 #ifdef RTE_LIBRTE_BITRATE
3182 if (bitrate_enabled != 0) {
3183 bitrate_data = rte_stats_bitrate_create();
3184 if (bitrate_data == NULL)
3185 rte_exit(EXIT_FAILURE,
3186 "Could not allocate bitrate data.\n");
3187 rte_stats_bitrate_reg(bitrate_data);
3191 #ifdef RTE_LIBRTE_CMDLINE
3192 if (strlen(cmdline_filename) != 0)
3193 cmdline_read_from_file(cmdline_filename);
3195 if (interactive == 1) {
3197 printf("Start automatic packet forwarding\n");
3198 start_packet_forwarding(0);
3210 printf("No commandline core given, start packet forwarding\n");
3211 start_packet_forwarding(tx_first);
3212 if (stats_period != 0) {
3213 uint64_t prev_time = 0, cur_time, diff_time = 0;
3214 uint64_t timer_period;
3216 /* Convert to number of cycles */
3217 timer_period = stats_period * rte_get_timer_hz();
3219 while (f_quit == 0) {
3220 cur_time = rte_get_timer_cycles();
3221 diff_time += cur_time - prev_time;
3223 if (diff_time >= timer_period) {
3225 /* Reset the timer */
3228 /* Sleep to avoid unnecessary checks */
3229 prev_time = cur_time;
3234 printf("Press enter to exit\n");
3235 rc = read(0, &c, 1);