1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
16 #include <sys/queue.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
41 #include <rte_interrupts.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
71 char cmdline_filename[PATH_MAX] = {0};
74 * NUMA support configuration.
75 * When set, the NUMA support attempts to dispatch the allocation of the
76 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77 * probed ports among the CPU sockets 0 and 1.
78 * Otherwise, all memory is allocated from CPU socket 0.
80 uint8_t numa_support = 1; /**< numa enabled by default */
83 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
86 uint8_t socket_num = UMA_NO_CONFIG;
89 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
94 * Record the Ethernet address of peer target ports to which packets are
96 * Must be instantiated with the ethernet addresses of peer traffic generator
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
103 * Probed Target Environment.
105 struct rte_port *ports; /**< For all probed ethernet ports. */
106 portid_t nb_ports; /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
111 * Test Forwarding Configuration.
112 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t nb_cfg_ports; /**< Number of configured ports. */
118 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
127 * Forwarding engines.
129 struct fwd_engine * fwd_engines[] = {
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
140 &softnic_tm_bypass_engine,
142 #ifdef RTE_LIBRTE_IEEE1588
143 &ieee1588_fwd_engine,
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
156 * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
160 * In container, it cannot terminate the process which running with 'stats-period'
161 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
166 * Configuration of packet segments used by the "txonly" processing engine.
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170 TXONLY_DEF_PACKET_LEN,
172 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
187 * Configurable number of RX/TX queues.
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193 * Configurable number of RX/TX ring descriptors.
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 #define RTE_PMD_PARAM_UNSET -1
202 * Configurable values of RX and TX ring threshold registers.
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214 * Configurable value of RX free threshold.
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219 * Configurable value of RX drop enable.
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224 * Configurable value of TX free threshold.
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229 * Configurable value of TX RS bit threshold.
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234 * Configurable value of TX queue flags.
236 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239 * Receive Side Scaling (RSS) configuration.
241 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244 * Port topology configuration
246 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249 * Avoids to flush all the RX streams before starts forwarding.
251 uint8_t no_flush_rx = 0; /* flush by default */
254 * Flow API isolated mode.
256 uint8_t flow_isolate_all;
259 * Avoids to check link status when starting/stopping a port.
261 uint8_t no_link_check = 0; /* check by default */
264 * Enable link status change notification
266 uint8_t lsc_interrupt = 1; /* enabled by default */
269 * Enable device removal notification.
271 uint8_t rmv_interrupt = 1; /* enabled by default */
274 * Display or mask ether events
275 * Default to all events except VF_MBOX
277 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
278 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
279 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
280 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
281 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
282 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
285 * NIC bypass mode configuration options.
288 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
294 #ifdef RTE_LIBRTE_LATENCY_STATS
297 * Set when latency stats is enabled in the commandline
299 uint8_t latencystats_enabled;
302 * Lcore ID to serive latency statistics.
304 lcoreid_t latencystats_lcore_id = -1;
309 * Ethernet device configuration.
311 struct rte_eth_rxmode rx_mode = {
312 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
314 .header_split = 0, /**< Header Split disabled. */
315 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
318 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
320 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
321 .hw_timestamp = 0, /**< HW timestamp enabled. */
324 struct rte_fdir_conf fdir_conf = {
325 .mode = RTE_FDIR_MODE_NONE,
326 .pballoc = RTE_FDIR_PBALLOC_64K,
327 .status = RTE_FDIR_REPORT_STATUS,
329 .vlan_tci_mask = 0x0,
331 .src_ip = 0xFFFFFFFF,
332 .dst_ip = 0xFFFFFFFF,
335 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
338 .src_port_mask = 0xFFFF,
339 .dst_port_mask = 0xFFFF,
340 .mac_addr_byte_mask = 0xFF,
341 .tunnel_type_mask = 1,
342 .tunnel_id_mask = 0xFFFFFFFF,
347 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
349 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
350 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
352 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
353 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
355 uint16_t nb_tx_queue_stats_mappings = 0;
356 uint16_t nb_rx_queue_stats_mappings = 0;
359 * Display zero values by default for xstats
361 uint8_t xstats_hide_zero;
363 unsigned int num_sockets = 0;
364 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
366 #ifdef RTE_LIBRTE_BITRATE
367 /* Bitrate statistics */
368 struct rte_stats_bitrates *bitrate_data;
369 lcoreid_t bitrate_lcore_id;
370 uint8_t bitrate_enabled;
373 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
374 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
376 /* Forward function declarations */
377 static void map_port_queue_stats_mapping_registers(portid_t pi,
378 struct rte_port *port);
379 static void check_all_ports_link_status(uint32_t port_mask);
380 static int eth_event_callback(portid_t port_id,
381 enum rte_eth_event_type type,
382 void *param, void *ret_param);
385 * Check if all the ports are started.
386 * If yes, return positive value. If not, return zero.
388 static int all_ports_started(void);
390 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
391 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
394 * Helper function to check if socket is already discovered.
395 * If yes, return positive value. If not, return zero.
398 new_socket_id(unsigned int socket_id)
402 for (i = 0; i < num_sockets; i++) {
403 if (socket_ids[i] == socket_id)
410 * Setup default configuration.
413 set_default_fwd_lcores_config(void)
417 unsigned int sock_num;
420 for (i = 0; i < RTE_MAX_LCORE; i++) {
421 sock_num = rte_lcore_to_socket_id(i);
422 if (new_socket_id(sock_num)) {
423 if (num_sockets >= RTE_MAX_NUMA_NODES) {
424 rte_exit(EXIT_FAILURE,
425 "Total sockets greater than %u\n",
428 socket_ids[num_sockets++] = sock_num;
430 if (!rte_lcore_is_enabled(i))
432 if (i == rte_get_master_lcore())
434 fwd_lcores_cpuids[nb_lc++] = i;
436 nb_lcores = (lcoreid_t) nb_lc;
437 nb_cfg_lcores = nb_lcores;
442 set_def_peer_eth_addrs(void)
446 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
447 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
448 peer_eth_addrs[i].addr_bytes[5] = i;
453 set_default_fwd_ports_config(void)
458 RTE_ETH_FOREACH_DEV(pt_id)
459 fwd_ports_ids[i++] = pt_id;
461 nb_cfg_ports = nb_ports;
462 nb_fwd_ports = nb_ports;
466 set_def_fwd_config(void)
468 set_default_fwd_lcores_config();
469 set_def_peer_eth_addrs();
470 set_default_fwd_ports_config();
474 * Configuration initialisation done once at init time.
477 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
478 unsigned int socket_id)
480 char pool_name[RTE_MEMPOOL_NAMESIZE];
481 struct rte_mempool *rte_mp = NULL;
484 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
485 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
488 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
489 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
492 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
493 mb_size, (unsigned) mb_mempool_cache,
494 sizeof(struct rte_pktmbuf_pool_private),
499 if (rte_mempool_populate_anon(rte_mp) == 0) {
500 rte_mempool_free(rte_mp);
504 rte_pktmbuf_pool_init(rte_mp, NULL);
505 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
507 /* wrapper to rte_mempool_create() */
508 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
509 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
513 if (rte_mp == NULL) {
514 rte_exit(EXIT_FAILURE,
515 "Creation of mbuf pool for socket %u failed: %s\n",
516 socket_id, rte_strerror(rte_errno));
517 } else if (verbose_level > 0) {
518 rte_mempool_dump(stdout, rte_mp);
523 * Check given socket id is valid or not with NUMA mode,
524 * if valid, return 0, else return -1
527 check_socket_id(const unsigned int socket_id)
529 static int warning_once = 0;
531 if (new_socket_id(socket_id)) {
532 if (!warning_once && numa_support)
533 printf("Warning: NUMA should be configured manually by"
534 " using --port-numa-config and"
535 " --ring-numa-config parameters along with"
547 struct rte_port *port;
548 struct rte_mempool *mbp;
549 unsigned int nb_mbuf_per_pool;
551 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
552 struct rte_gro_param gro_param;
555 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
558 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
559 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
560 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
563 /* Configuration of logical cores. */
564 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
565 sizeof(struct fwd_lcore *) * nb_lcores,
566 RTE_CACHE_LINE_SIZE);
567 if (fwd_lcores == NULL) {
568 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
569 "failed\n", nb_lcores);
571 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
572 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
573 sizeof(struct fwd_lcore),
574 RTE_CACHE_LINE_SIZE);
575 if (fwd_lcores[lc_id] == NULL) {
576 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
579 fwd_lcores[lc_id]->cpuid_idx = lc_id;
582 RTE_ETH_FOREACH_DEV(pid) {
584 rte_eth_dev_info_get(pid, &port->dev_info);
587 if (port_numa[pid] != NUMA_NO_CONFIG)
588 port_per_socket[port_numa[pid]]++;
590 uint32_t socket_id = rte_eth_dev_socket_id(pid);
592 /* if socket_id is invalid, set to 0 */
593 if (check_socket_id(socket_id) < 0)
595 port_per_socket[socket_id]++;
599 /* set flag to initialize port/queue */
600 port->need_reconfig = 1;
601 port->need_reconfig_queues = 1;
605 * Create pools of mbuf.
606 * If NUMA support is disabled, create a single pool of mbuf in
607 * socket 0 memory by default.
608 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
610 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
611 * nb_txd can be configured at run time.
613 if (param_total_num_mbufs)
614 nb_mbuf_per_pool = param_total_num_mbufs;
616 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
617 (nb_lcores * mb_mempool_cache) +
618 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
619 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
625 for (i = 0; i < num_sockets; i++)
626 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
629 if (socket_num == UMA_NO_CONFIG)
630 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
632 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
638 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
639 DEV_TX_OFFLOAD_GRE_TNL_TSO;
641 * Records which Mbuf pool to use by each logical core, if needed.
643 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
644 mbp = mbuf_pool_find(
645 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
648 mbp = mbuf_pool_find(0);
649 fwd_lcores[lc_id]->mbp = mbp;
650 /* initialize GSO context */
651 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
652 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
653 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
654 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
656 fwd_lcores[lc_id]->gso_ctx.flag = 0;
659 /* Configuration of packet forwarding streams. */
660 if (init_fwd_streams() < 0)
661 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
665 /* create a gro context for each lcore */
666 gro_param.gro_types = RTE_GRO_TCP_IPV4;
667 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
668 gro_param.max_item_per_flow = MAX_PKT_BURST;
669 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
670 gro_param.socket_id = rte_lcore_to_socket_id(
671 fwd_lcores_cpuids[lc_id]);
672 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
673 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
674 rte_exit(EXIT_FAILURE,
675 "rte_gro_ctx_create() failed\n");
682 reconfig(portid_t new_port_id, unsigned socket_id)
684 struct rte_port *port;
686 /* Reconfiguration of Ethernet ports. */
687 port = &ports[new_port_id];
688 rte_eth_dev_info_get(new_port_id, &port->dev_info);
690 /* set flag to initialize port/queue */
691 port->need_reconfig = 1;
692 port->need_reconfig_queues = 1;
693 port->socket_id = socket_id;
700 init_fwd_streams(void)
703 struct rte_port *port;
704 streamid_t sm_id, nb_fwd_streams_new;
707 /* set socket id according to numa or not */
708 RTE_ETH_FOREACH_DEV(pid) {
710 if (nb_rxq > port->dev_info.max_rx_queues) {
711 printf("Fail: nb_rxq(%d) is greater than "
712 "max_rx_queues(%d)\n", nb_rxq,
713 port->dev_info.max_rx_queues);
716 if (nb_txq > port->dev_info.max_tx_queues) {
717 printf("Fail: nb_txq(%d) is greater than "
718 "max_tx_queues(%d)\n", nb_txq,
719 port->dev_info.max_tx_queues);
723 if (port_numa[pid] != NUMA_NO_CONFIG)
724 port->socket_id = port_numa[pid];
726 port->socket_id = rte_eth_dev_socket_id(pid);
728 /* if socket_id is invalid, set to 0 */
729 if (check_socket_id(port->socket_id) < 0)
734 if (socket_num == UMA_NO_CONFIG)
737 port->socket_id = socket_num;
741 q = RTE_MAX(nb_rxq, nb_txq);
743 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
746 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
747 if (nb_fwd_streams_new == nb_fwd_streams)
750 if (fwd_streams != NULL) {
751 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
752 if (fwd_streams[sm_id] == NULL)
754 rte_free(fwd_streams[sm_id]);
755 fwd_streams[sm_id] = NULL;
757 rte_free(fwd_streams);
762 nb_fwd_streams = nb_fwd_streams_new;
763 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
764 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
765 if (fwd_streams == NULL)
766 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
767 "failed\n", nb_fwd_streams);
769 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
770 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
771 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
772 if (fwd_streams[sm_id] == NULL)
773 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
780 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
782 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
784 unsigned int total_burst;
785 unsigned int nb_burst;
786 unsigned int burst_stats[3];
787 uint16_t pktnb_stats[3];
789 int burst_percent[3];
792 * First compute the total number of packet bursts and the
793 * two highest numbers of bursts of the same number of packets.
796 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
797 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
798 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
799 nb_burst = pbs->pkt_burst_spread[nb_pkt];
802 total_burst += nb_burst;
803 if (nb_burst > burst_stats[0]) {
804 burst_stats[1] = burst_stats[0];
805 pktnb_stats[1] = pktnb_stats[0];
806 burst_stats[0] = nb_burst;
807 pktnb_stats[0] = nb_pkt;
810 if (total_burst == 0)
812 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
813 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
814 burst_percent[0], (int) pktnb_stats[0]);
815 if (burst_stats[0] == total_burst) {
819 if (burst_stats[0] + burst_stats[1] == total_burst) {
820 printf(" + %d%% of %d pkts]\n",
821 100 - burst_percent[0], pktnb_stats[1]);
824 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
825 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
826 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
827 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
830 printf(" + %d%% of %d pkts + %d%% of others]\n",
831 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
833 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
836 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
838 struct rte_port *port;
841 static const char *fwd_stats_border = "----------------------";
843 port = &ports[port_id];
844 printf("\n %s Forward statistics for port %-2d %s\n",
845 fwd_stats_border, port_id, fwd_stats_border);
847 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
848 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
850 stats->ipackets, stats->imissed,
851 (uint64_t) (stats->ipackets + stats->imissed));
853 if (cur_fwd_eng == &csum_fwd_engine)
854 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
855 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
856 if ((stats->ierrors + stats->rx_nombuf) > 0) {
857 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
858 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
861 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
863 stats->opackets, port->tx_dropped,
864 (uint64_t) (stats->opackets + port->tx_dropped));
867 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
869 stats->ipackets, stats->imissed,
870 (uint64_t) (stats->ipackets + stats->imissed));
872 if (cur_fwd_eng == &csum_fwd_engine)
873 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
874 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
875 if ((stats->ierrors + stats->rx_nombuf) > 0) {
876 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
877 printf(" RX-nombufs: %14"PRIu64"\n",
881 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
883 stats->opackets, port->tx_dropped,
884 (uint64_t) (stats->opackets + port->tx_dropped));
887 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
889 pkt_burst_stats_display("RX",
890 &port->rx_stream->rx_burst_stats);
892 pkt_burst_stats_display("TX",
893 &port->tx_stream->tx_burst_stats);
896 if (port->rx_queue_stats_mapping_enabled) {
898 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
899 printf(" Stats reg %2d RX-packets:%14"PRIu64
900 " RX-errors:%14"PRIu64
901 " RX-bytes:%14"PRIu64"\n",
902 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
906 if (port->tx_queue_stats_mapping_enabled) {
907 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
908 printf(" Stats reg %2d TX-packets:%14"PRIu64
909 " TX-bytes:%14"PRIu64"\n",
910 i, stats->q_opackets[i], stats->q_obytes[i]);
914 printf(" %s--------------------------------%s\n",
915 fwd_stats_border, fwd_stats_border);
919 fwd_stream_stats_display(streamid_t stream_id)
921 struct fwd_stream *fs;
922 static const char *fwd_top_stats_border = "-------";
924 fs = fwd_streams[stream_id];
925 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
926 (fs->fwd_dropped == 0))
928 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
929 "TX Port=%2d/Queue=%2d %s\n",
930 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
931 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
932 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
933 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
935 /* if checksum mode */
936 if (cur_fwd_eng == &csum_fwd_engine) {
937 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
938 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
941 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
942 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
943 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
948 flush_fwd_rx_queues(void)
950 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
957 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
958 uint64_t timer_period;
960 /* convert to number of cycles */
961 timer_period = rte_get_timer_hz(); /* 1 second timeout */
963 for (j = 0; j < 2; j++) {
964 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
965 for (rxq = 0; rxq < nb_rxq; rxq++) {
966 port_id = fwd_ports_ids[rxp];
968 * testpmd can stuck in the below do while loop
969 * if rte_eth_rx_burst() always returns nonzero
970 * packets. So timer is added to exit this loop
971 * after 1sec timer expiry.
973 prev_tsc = rte_rdtsc();
975 nb_rx = rte_eth_rx_burst(port_id, rxq,
976 pkts_burst, MAX_PKT_BURST);
977 for (i = 0; i < nb_rx; i++)
978 rte_pktmbuf_free(pkts_burst[i]);
980 cur_tsc = rte_rdtsc();
981 diff_tsc = cur_tsc - prev_tsc;
982 timer_tsc += diff_tsc;
983 } while ((nb_rx > 0) &&
984 (timer_tsc < timer_period));
988 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
993 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
995 struct fwd_stream **fsm;
998 #ifdef RTE_LIBRTE_BITRATE
999 uint64_t tics_per_1sec;
1000 uint64_t tics_datum;
1001 uint64_t tics_current;
1002 uint8_t idx_port, cnt_ports;
1004 cnt_ports = rte_eth_dev_count();
1005 tics_datum = rte_rdtsc();
1006 tics_per_1sec = rte_get_timer_hz();
1008 fsm = &fwd_streams[fc->stream_idx];
1009 nb_fs = fc->stream_nb;
1011 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1012 (*pkt_fwd)(fsm[sm_id]);
1013 #ifdef RTE_LIBRTE_BITRATE
1014 if (bitrate_enabled != 0 &&
1015 bitrate_lcore_id == rte_lcore_id()) {
1016 tics_current = rte_rdtsc();
1017 if (tics_current - tics_datum >= tics_per_1sec) {
1018 /* Periodic bitrate calculation */
1020 idx_port < cnt_ports;
1022 rte_stats_bitrate_calc(bitrate_data,
1024 tics_datum = tics_current;
1028 #ifdef RTE_LIBRTE_LATENCY_STATS
1029 if (latencystats_enabled != 0 &&
1030 latencystats_lcore_id == rte_lcore_id())
1031 rte_latencystats_update();
1034 } while (! fc->stopped);
1038 start_pkt_forward_on_core(void *fwd_arg)
1040 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1041 cur_fwd_config.fwd_eng->packet_fwd);
1046 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1047 * Used to start communication flows in network loopback test configurations.
1050 run_one_txonly_burst_on_core(void *fwd_arg)
1052 struct fwd_lcore *fwd_lc;
1053 struct fwd_lcore tmp_lcore;
1055 fwd_lc = (struct fwd_lcore *) fwd_arg;
1056 tmp_lcore = *fwd_lc;
1057 tmp_lcore.stopped = 1;
1058 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1063 * Launch packet forwarding:
1064 * - Setup per-port forwarding context.
1065 * - launch logical cores with their forwarding configuration.
1068 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1070 port_fwd_begin_t port_fwd_begin;
1075 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1076 if (port_fwd_begin != NULL) {
1077 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1078 (*port_fwd_begin)(fwd_ports_ids[i]);
1080 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1081 lc_id = fwd_lcores_cpuids[i];
1082 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1083 fwd_lcores[i]->stopped = 0;
1084 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1085 fwd_lcores[i], lc_id);
1087 printf("launch lcore %u failed - diag=%d\n",
1094 * Launch packet forwarding configuration.
1097 start_packet_forwarding(int with_tx_first)
1099 port_fwd_begin_t port_fwd_begin;
1100 port_fwd_end_t port_fwd_end;
1101 struct rte_port *port;
1106 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1107 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1109 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1110 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1112 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1113 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1114 (!nb_rxq || !nb_txq))
1115 rte_exit(EXIT_FAILURE,
1116 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1117 cur_fwd_eng->fwd_mode_name);
1119 if (all_ports_started() == 0) {
1120 printf("Not all ports were started\n");
1123 if (test_done == 0) {
1124 printf("Packet forwarding already started\n");
1128 if (init_fwd_streams() < 0) {
1129 printf("Fail from init_fwd_streams()\n");
1134 for (i = 0; i < nb_fwd_ports; i++) {
1135 pt_id = fwd_ports_ids[i];
1136 port = &ports[pt_id];
1137 if (!port->dcb_flag) {
1138 printf("In DCB mode, all forwarding ports must "
1139 "be configured in this mode.\n");
1143 if (nb_fwd_lcores == 1) {
1144 printf("In DCB mode,the nb forwarding cores "
1145 "should be larger than 1.\n");
1152 flush_fwd_rx_queues();
1155 pkt_fwd_config_display(&cur_fwd_config);
1156 rxtx_config_display();
1158 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1159 pt_id = fwd_ports_ids[i];
1160 port = &ports[pt_id];
1161 rte_eth_stats_get(pt_id, &port->stats);
1162 port->tx_dropped = 0;
1164 map_port_queue_stats_mapping_registers(pt_id, port);
1166 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1167 fwd_streams[sm_id]->rx_packets = 0;
1168 fwd_streams[sm_id]->tx_packets = 0;
1169 fwd_streams[sm_id]->fwd_dropped = 0;
1170 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1171 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1173 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1174 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1175 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1176 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1177 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1179 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1180 fwd_streams[sm_id]->core_cycles = 0;
1183 if (with_tx_first) {
1184 port_fwd_begin = tx_only_engine.port_fwd_begin;
1185 if (port_fwd_begin != NULL) {
1186 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1187 (*port_fwd_begin)(fwd_ports_ids[i]);
1189 while (with_tx_first--) {
1190 launch_packet_forwarding(
1191 run_one_txonly_burst_on_core);
1192 rte_eal_mp_wait_lcore();
1194 port_fwd_end = tx_only_engine.port_fwd_end;
1195 if (port_fwd_end != NULL) {
1196 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1197 (*port_fwd_end)(fwd_ports_ids[i]);
1200 launch_packet_forwarding(start_pkt_forward_on_core);
1204 stop_packet_forwarding(void)
1206 struct rte_eth_stats stats;
1207 struct rte_port *port;
1208 port_fwd_end_t port_fwd_end;
1213 uint64_t total_recv;
1214 uint64_t total_xmit;
1215 uint64_t total_rx_dropped;
1216 uint64_t total_tx_dropped;
1217 uint64_t total_rx_nombuf;
1218 uint64_t tx_dropped;
1219 uint64_t rx_bad_ip_csum;
1220 uint64_t rx_bad_l4_csum;
1221 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1222 uint64_t fwd_cycles;
1225 static const char *acc_stats_border = "+++++++++++++++";
1228 printf("Packet forwarding not started\n");
1231 printf("Telling cores to stop...");
1232 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1233 fwd_lcores[lc_id]->stopped = 1;
1234 printf("\nWaiting for lcores to finish...\n");
1235 rte_eal_mp_wait_lcore();
1236 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1237 if (port_fwd_end != NULL) {
1238 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1239 pt_id = fwd_ports_ids[i];
1240 (*port_fwd_end)(pt_id);
1243 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1246 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1247 if (cur_fwd_config.nb_fwd_streams >
1248 cur_fwd_config.nb_fwd_ports) {
1249 fwd_stream_stats_display(sm_id);
1250 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1251 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1253 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1255 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1258 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1259 tx_dropped = (uint64_t) (tx_dropped +
1260 fwd_streams[sm_id]->fwd_dropped);
1261 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1264 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1265 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1266 fwd_streams[sm_id]->rx_bad_ip_csum);
1267 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1271 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1272 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1273 fwd_streams[sm_id]->rx_bad_l4_csum);
1274 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1277 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1278 fwd_cycles = (uint64_t) (fwd_cycles +
1279 fwd_streams[sm_id]->core_cycles);
1284 total_rx_dropped = 0;
1285 total_tx_dropped = 0;
1286 total_rx_nombuf = 0;
1287 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1288 pt_id = fwd_ports_ids[i];
1290 port = &ports[pt_id];
1291 rte_eth_stats_get(pt_id, &stats);
1292 stats.ipackets -= port->stats.ipackets;
1293 port->stats.ipackets = 0;
1294 stats.opackets -= port->stats.opackets;
1295 port->stats.opackets = 0;
1296 stats.ibytes -= port->stats.ibytes;
1297 port->stats.ibytes = 0;
1298 stats.obytes -= port->stats.obytes;
1299 port->stats.obytes = 0;
1300 stats.imissed -= port->stats.imissed;
1301 port->stats.imissed = 0;
1302 stats.oerrors -= port->stats.oerrors;
1303 port->stats.oerrors = 0;
1304 stats.rx_nombuf -= port->stats.rx_nombuf;
1305 port->stats.rx_nombuf = 0;
1307 total_recv += stats.ipackets;
1308 total_xmit += stats.opackets;
1309 total_rx_dropped += stats.imissed;
1310 total_tx_dropped += port->tx_dropped;
1311 total_rx_nombuf += stats.rx_nombuf;
1313 fwd_port_stats_display(pt_id, &stats);
1316 printf("\n %s Accumulated forward statistics for all ports"
1318 acc_stats_border, acc_stats_border);
1319 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1321 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1323 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1324 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1325 if (total_rx_nombuf > 0)
1326 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1327 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1329 acc_stats_border, acc_stats_border);
1330 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1332 printf("\n CPU cycles/packet=%u (total cycles="
1333 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1334 (unsigned int)(fwd_cycles / total_recv),
1335 fwd_cycles, total_recv);
1337 printf("\nDone.\n");
1342 dev_set_link_up(portid_t pid)
1344 if (rte_eth_dev_set_link_up(pid) < 0)
1345 printf("\nSet link up fail.\n");
1349 dev_set_link_down(portid_t pid)
1351 if (rte_eth_dev_set_link_down(pid) < 0)
1352 printf("\nSet link down fail.\n");
1356 all_ports_started(void)
1359 struct rte_port *port;
1361 RTE_ETH_FOREACH_DEV(pi) {
1363 /* Check if there is a port which is not started */
1364 if ((port->port_status != RTE_PORT_STARTED) &&
1365 (port->slave_flag == 0))
1369 /* No port is not started */
1374 all_ports_stopped(void)
1377 struct rte_port *port;
1379 RTE_ETH_FOREACH_DEV(pi) {
1381 if ((port->port_status != RTE_PORT_STOPPED) &&
1382 (port->slave_flag == 0))
1390 port_is_started(portid_t port_id)
1392 if (port_id_is_invalid(port_id, ENABLED_WARN))
1395 if (ports[port_id].port_status != RTE_PORT_STARTED)
1402 port_is_closed(portid_t port_id)
1404 if (port_id_is_invalid(port_id, ENABLED_WARN))
1407 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1414 start_port(portid_t pid)
1416 int diag, need_check_link_status = -1;
1419 struct rte_port *port;
1420 struct ether_addr mac_addr;
1421 enum rte_eth_event_type event_type;
1423 if (port_id_is_invalid(pid, ENABLED_WARN))
1428 RTE_ETH_FOREACH_DEV(pi) {
1429 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1432 need_check_link_status = 0;
1434 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1435 RTE_PORT_HANDLING) == 0) {
1436 printf("Port %d is now not stopped\n", pi);
1440 if (port->need_reconfig > 0) {
1441 port->need_reconfig = 0;
1443 if (flow_isolate_all) {
1444 int ret = port_flow_isolate(pi, 1);
1446 printf("Failed to apply isolated"
1447 " mode on port %d\n", pi);
1452 printf("Configuring Port %d (socket %u)\n", pi,
1454 /* configure port */
1455 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1458 if (rte_atomic16_cmpset(&(port->port_status),
1459 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1460 printf("Port %d can not be set back "
1461 "to stopped\n", pi);
1462 printf("Fail to configure port %d\n", pi);
1463 /* try to reconfigure port next time */
1464 port->need_reconfig = 1;
1468 if (port->need_reconfig_queues > 0) {
1469 port->need_reconfig_queues = 0;
1470 /* setup tx queues */
1471 for (qi = 0; qi < nb_txq; qi++) {
1472 if ((numa_support) &&
1473 (txring_numa[pi] != NUMA_NO_CONFIG))
1474 diag = rte_eth_tx_queue_setup(pi, qi,
1475 nb_txd,txring_numa[pi],
1478 diag = rte_eth_tx_queue_setup(pi, qi,
1479 nb_txd,port->socket_id,
1485 /* Fail to setup tx queue, return */
1486 if (rte_atomic16_cmpset(&(port->port_status),
1488 RTE_PORT_STOPPED) == 0)
1489 printf("Port %d can not be set back "
1490 "to stopped\n", pi);
1491 printf("Fail to configure port %d tx queues\n", pi);
1492 /* try to reconfigure queues next time */
1493 port->need_reconfig_queues = 1;
1496 /* setup rx queues */
1497 for (qi = 0; qi < nb_rxq; qi++) {
1498 if ((numa_support) &&
1499 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1500 struct rte_mempool * mp =
1501 mbuf_pool_find(rxring_numa[pi]);
1503 printf("Failed to setup RX queue:"
1504 "No mempool allocation"
1505 " on the socket %d\n",
1510 diag = rte_eth_rx_queue_setup(pi, qi,
1511 nb_rxd,rxring_numa[pi],
1512 &(port->rx_conf),mp);
1514 struct rte_mempool *mp =
1515 mbuf_pool_find(port->socket_id);
1517 printf("Failed to setup RX queue:"
1518 "No mempool allocation"
1519 " on the socket %d\n",
1523 diag = rte_eth_rx_queue_setup(pi, qi,
1524 nb_rxd,port->socket_id,
1525 &(port->rx_conf), mp);
1530 /* Fail to setup rx queue, return */
1531 if (rte_atomic16_cmpset(&(port->port_status),
1533 RTE_PORT_STOPPED) == 0)
1534 printf("Port %d can not be set back "
1535 "to stopped\n", pi);
1536 printf("Fail to configure port %d rx queues\n", pi);
1537 /* try to reconfigure queues next time */
1538 port->need_reconfig_queues = 1;
1543 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1544 event_type < RTE_ETH_EVENT_MAX;
1546 diag = rte_eth_dev_callback_register(pi,
1551 printf("Failed to setup even callback for event %d\n",
1558 if (rte_eth_dev_start(pi) < 0) {
1559 printf("Fail to start port %d\n", pi);
1561 /* Fail to setup rx queue, return */
1562 if (rte_atomic16_cmpset(&(port->port_status),
1563 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1564 printf("Port %d can not be set back to "
1569 if (rte_atomic16_cmpset(&(port->port_status),
1570 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1571 printf("Port %d can not be set into started\n", pi);
1573 rte_eth_macaddr_get(pi, &mac_addr);
1574 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1575 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1576 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1577 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1579 /* at least one port started, need checking link status */
1580 need_check_link_status = 1;
1583 if (need_check_link_status == 1 && !no_link_check)
1584 check_all_ports_link_status(RTE_PORT_ALL);
1585 else if (need_check_link_status == 0)
1586 printf("Please stop the ports first\n");
1593 stop_port(portid_t pid)
1596 struct rte_port *port;
1597 int need_check_link_status = 0;
1604 if (port_id_is_invalid(pid, ENABLED_WARN))
1607 printf("Stopping ports...\n");
1609 RTE_ETH_FOREACH_DEV(pi) {
1610 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1613 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1614 printf("Please remove port %d from forwarding configuration.\n", pi);
1618 if (port_is_bonding_slave(pi)) {
1619 printf("Please remove port %d from bonded device.\n", pi);
1624 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1625 RTE_PORT_HANDLING) == 0)
1628 rte_eth_dev_stop(pi);
1630 if (rte_atomic16_cmpset(&(port->port_status),
1631 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1632 printf("Port %d can not be set into stopped\n", pi);
1633 need_check_link_status = 1;
1635 if (need_check_link_status && !no_link_check)
1636 check_all_ports_link_status(RTE_PORT_ALL);
1642 close_port(portid_t pid)
1645 struct rte_port *port;
1647 if (port_id_is_invalid(pid, ENABLED_WARN))
1650 printf("Closing ports...\n");
1652 RTE_ETH_FOREACH_DEV(pi) {
1653 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1656 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1657 printf("Please remove port %d from forwarding configuration.\n", pi);
1661 if (port_is_bonding_slave(pi)) {
1662 printf("Please remove port %d from bonded device.\n", pi);
1667 if (rte_atomic16_cmpset(&(port->port_status),
1668 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1669 printf("Port %d is already closed\n", pi);
1673 if (rte_atomic16_cmpset(&(port->port_status),
1674 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1675 printf("Port %d is now not stopped\n", pi);
1679 if (port->flow_list)
1680 port_flow_flush(pi);
1681 rte_eth_dev_close(pi);
1683 if (rte_atomic16_cmpset(&(port->port_status),
1684 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1685 printf("Port %d cannot be set to closed\n", pi);
1692 reset_port(portid_t pid)
1696 struct rte_port *port;
1698 if (port_id_is_invalid(pid, ENABLED_WARN))
1701 printf("Resetting ports...\n");
1703 RTE_ETH_FOREACH_DEV(pi) {
1704 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1707 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1708 printf("Please remove port %d from forwarding "
1709 "configuration.\n", pi);
1713 if (port_is_bonding_slave(pi)) {
1714 printf("Please remove port %d from bonded device.\n",
1719 diag = rte_eth_dev_reset(pi);
1722 port->need_reconfig = 1;
1723 port->need_reconfig_queues = 1;
1725 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1733 attach_port(char *identifier)
1736 unsigned int socket_id;
1738 printf("Attaching a new port...\n");
1740 if (identifier == NULL) {
1741 printf("Invalid parameters are specified\n");
1745 if (rte_eth_dev_attach(identifier, &pi))
1748 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1749 /* if socket_id is invalid, set to 0 */
1750 if (check_socket_id(socket_id) < 0)
1752 reconfig(pi, socket_id);
1753 rte_eth_promiscuous_enable(pi);
1755 nb_ports = rte_eth_dev_count();
1757 ports[pi].port_status = RTE_PORT_STOPPED;
1759 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1764 detach_port(portid_t port_id)
1766 char name[RTE_ETH_NAME_MAX_LEN];
1768 printf("Detaching a port...\n");
1770 if (!port_is_closed(port_id)) {
1771 printf("Please close port first\n");
1775 if (ports[port_id].flow_list)
1776 port_flow_flush(port_id);
1778 if (rte_eth_dev_detach(port_id, name)) {
1779 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1783 nb_ports = rte_eth_dev_count();
1785 printf("Port '%s' is detached. Now total ports is %d\n",
1797 stop_packet_forwarding();
1799 if (ports != NULL) {
1801 RTE_ETH_FOREACH_DEV(pt_id) {
1802 printf("\nShutting down port %d...\n", pt_id);
1808 printf("\nBye...\n");
1811 typedef void (*cmd_func_t)(void);
1812 struct pmd_test_command {
1813 const char *cmd_name;
1814 cmd_func_t cmd_func;
1817 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1819 /* Check the link status of all ports in up to 9s, and print them finally */
1821 check_all_ports_link_status(uint32_t port_mask)
1823 #define CHECK_INTERVAL 100 /* 100ms */
1824 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1826 uint8_t count, all_ports_up, print_flag = 0;
1827 struct rte_eth_link link;
1829 printf("Checking link statuses...\n");
1831 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1833 RTE_ETH_FOREACH_DEV(portid) {
1834 if ((port_mask & (1 << portid)) == 0)
1836 memset(&link, 0, sizeof(link));
1837 rte_eth_link_get_nowait(portid, &link);
1838 /* print link status if flag set */
1839 if (print_flag == 1) {
1840 if (link.link_status)
1842 "Port%d Link Up. speed %u Mbps- %s\n",
1843 portid, link.link_speed,
1844 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1845 ("full-duplex") : ("half-duplex\n"));
1847 printf("Port %d Link Down\n", portid);
1850 /* clear all_ports_up flag if any link down */
1851 if (link.link_status == ETH_LINK_DOWN) {
1856 /* after finally printing all link status, get out */
1857 if (print_flag == 1)
1860 if (all_ports_up == 0) {
1862 rte_delay_ms(CHECK_INTERVAL);
1865 /* set the print_flag if all ports up or timeout */
1866 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1876 rmv_event_callback(void *arg)
1878 struct rte_eth_dev *dev;
1879 portid_t port_id = (intptr_t)arg;
1881 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1882 dev = &rte_eth_devices[port_id];
1885 close_port(port_id);
1886 printf("removing device %s\n", dev->device->name);
1887 if (rte_eal_dev_detach(dev->device))
1888 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1892 /* This function is used by the interrupt thread */
1894 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1897 static const char * const event_desc[] = {
1898 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1899 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1900 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1901 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1902 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1903 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1904 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1905 [RTE_ETH_EVENT_MAX] = NULL,
1908 RTE_SET_USED(param);
1909 RTE_SET_USED(ret_param);
1911 if (type >= RTE_ETH_EVENT_MAX) {
1912 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1913 port_id, __func__, type);
1915 } else if (event_print_mask & (UINT32_C(1) << type)) {
1916 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1922 case RTE_ETH_EVENT_INTR_RMV:
1923 if (rte_eal_alarm_set(100000,
1924 rmv_event_callback, (void *)(intptr_t)port_id))
1925 fprintf(stderr, "Could not set up deferred device removal\n");
1934 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1938 uint8_t mapping_found = 0;
1940 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1941 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1942 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1943 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1944 tx_queue_stats_mappings[i].queue_id,
1945 tx_queue_stats_mappings[i].stats_counter_id);
1952 port->tx_queue_stats_mapping_enabled = 1;
1957 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1961 uint8_t mapping_found = 0;
1963 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1964 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1965 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1966 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1967 rx_queue_stats_mappings[i].queue_id,
1968 rx_queue_stats_mappings[i].stats_counter_id);
1975 port->rx_queue_stats_mapping_enabled = 1;
1980 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1984 diag = set_tx_queue_stats_mapping_registers(pi, port);
1986 if (diag == -ENOTSUP) {
1987 port->tx_queue_stats_mapping_enabled = 0;
1988 printf("TX queue stats mapping not supported port id=%d\n", pi);
1991 rte_exit(EXIT_FAILURE,
1992 "set_tx_queue_stats_mapping_registers "
1993 "failed for port id=%d diag=%d\n",
1997 diag = set_rx_queue_stats_mapping_registers(pi, port);
1999 if (diag == -ENOTSUP) {
2000 port->rx_queue_stats_mapping_enabled = 0;
2001 printf("RX queue stats mapping not supported port id=%d\n", pi);
2004 rte_exit(EXIT_FAILURE,
2005 "set_rx_queue_stats_mapping_registers "
2006 "failed for port id=%d diag=%d\n",
2012 rxtx_port_config(struct rte_port *port)
2014 port->rx_conf = port->dev_info.default_rxconf;
2015 port->tx_conf = port->dev_info.default_txconf;
2017 /* Check if any RX/TX parameters have been passed */
2018 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2019 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2021 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2022 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2024 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2025 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2027 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2028 port->rx_conf.rx_free_thresh = rx_free_thresh;
2030 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2031 port->rx_conf.rx_drop_en = rx_drop_en;
2033 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2034 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2036 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2037 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2039 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2040 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2042 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2043 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2045 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2046 port->tx_conf.tx_free_thresh = tx_free_thresh;
2048 if (txq_flags != RTE_PMD_PARAM_UNSET)
2049 port->tx_conf.txq_flags = txq_flags;
2053 init_port_config(void)
2056 struct rte_port *port;
2058 RTE_ETH_FOREACH_DEV(pid) {
2060 port->dev_conf.rxmode = rx_mode;
2061 port->dev_conf.fdir_conf = fdir_conf;
2063 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2064 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2066 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2067 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2070 if (port->dcb_flag == 0) {
2071 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2072 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2074 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2077 rxtx_port_config(port);
2079 rte_eth_macaddr_get(pid, &port->eth_addr);
2081 map_port_queue_stats_mapping_registers(pid, port);
2082 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2083 rte_pmd_ixgbe_bypass_init(pid);
2086 if (lsc_interrupt &&
2087 (rte_eth_devices[pid].data->dev_flags &
2088 RTE_ETH_DEV_INTR_LSC))
2089 port->dev_conf.intr_conf.lsc = 1;
2090 if (rmv_interrupt &&
2091 (rte_eth_devices[pid].data->dev_flags &
2092 RTE_ETH_DEV_INTR_RMV))
2093 port->dev_conf.intr_conf.rmv = 1;
2095 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2096 /* Detect softnic port */
2097 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2098 port->softnic_enable = 1;
2099 memset(&port->softport, 0, sizeof(struct softnic_port));
2101 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2102 port->softport.tm_flag = 1;
2108 void set_port_slave_flag(portid_t slave_pid)
2110 struct rte_port *port;
2112 port = &ports[slave_pid];
2113 port->slave_flag = 1;
2116 void clear_port_slave_flag(portid_t slave_pid)
2118 struct rte_port *port;
2120 port = &ports[slave_pid];
2121 port->slave_flag = 0;
2124 uint8_t port_is_bonding_slave(portid_t slave_pid)
2126 struct rte_port *port;
2128 port = &ports[slave_pid];
2129 return port->slave_flag;
2132 const uint16_t vlan_tags[] = {
2133 0, 1, 2, 3, 4, 5, 6, 7,
2134 8, 9, 10, 11, 12, 13, 14, 15,
2135 16, 17, 18, 19, 20, 21, 22, 23,
2136 24, 25, 26, 27, 28, 29, 30, 31
2140 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2141 enum dcb_mode_enable dcb_mode,
2142 enum rte_eth_nb_tcs num_tcs,
2148 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2149 * given above, and the number of traffic classes available for use.
2151 if (dcb_mode == DCB_VT_ENABLED) {
2152 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2153 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2154 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2155 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2157 /* VMDQ+DCB RX and TX configurations */
2158 vmdq_rx_conf->enable_default_pool = 0;
2159 vmdq_rx_conf->default_pool = 0;
2160 vmdq_rx_conf->nb_queue_pools =
2161 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2162 vmdq_tx_conf->nb_queue_pools =
2163 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2165 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2166 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2167 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2168 vmdq_rx_conf->pool_map[i].pools =
2169 1 << (i % vmdq_rx_conf->nb_queue_pools);
2171 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2172 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2173 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2176 /* set DCB mode of RX and TX of multiple queues */
2177 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2178 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2180 struct rte_eth_dcb_rx_conf *rx_conf =
2181 ð_conf->rx_adv_conf.dcb_rx_conf;
2182 struct rte_eth_dcb_tx_conf *tx_conf =
2183 ð_conf->tx_adv_conf.dcb_tx_conf;
2185 rx_conf->nb_tcs = num_tcs;
2186 tx_conf->nb_tcs = num_tcs;
2188 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2189 rx_conf->dcb_tc[i] = i % num_tcs;
2190 tx_conf->dcb_tc[i] = i % num_tcs;
2192 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2193 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2194 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2198 eth_conf->dcb_capability_en =
2199 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2201 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2207 init_port_dcb_config(portid_t pid,
2208 enum dcb_mode_enable dcb_mode,
2209 enum rte_eth_nb_tcs num_tcs,
2212 struct rte_eth_conf port_conf;
2213 struct rte_port *rte_port;
2217 rte_port = &ports[pid];
2219 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2220 /* Enter DCB configuration status */
2223 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2224 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2227 port_conf.rxmode.hw_vlan_filter = 1;
2230 * Write the configuration into the device.
2231 * Set the numbers of RX & TX queues to 0, so
2232 * the RX & TX queues will not be setup.
2234 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2236 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2238 /* If dev_info.vmdq_pool_base is greater than 0,
2239 * the queue id of vmdq pools is started after pf queues.
2241 if (dcb_mode == DCB_VT_ENABLED &&
2242 rte_port->dev_info.vmdq_pool_base > 0) {
2243 printf("VMDQ_DCB multi-queue mode is nonsensical"
2244 " for port %d.", pid);
2248 /* Assume the ports in testpmd have the same dcb capability
2249 * and has the same number of rxq and txq in dcb mode
2251 if (dcb_mode == DCB_VT_ENABLED) {
2252 if (rte_port->dev_info.max_vfs > 0) {
2253 nb_rxq = rte_port->dev_info.nb_rx_queues;
2254 nb_txq = rte_port->dev_info.nb_tx_queues;
2256 nb_rxq = rte_port->dev_info.max_rx_queues;
2257 nb_txq = rte_port->dev_info.max_tx_queues;
2260 /*if vt is disabled, use all pf queues */
2261 if (rte_port->dev_info.vmdq_pool_base == 0) {
2262 nb_rxq = rte_port->dev_info.max_rx_queues;
2263 nb_txq = rte_port->dev_info.max_tx_queues;
2265 nb_rxq = (queueid_t)num_tcs;
2266 nb_txq = (queueid_t)num_tcs;
2270 rx_free_thresh = 64;
2272 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2274 rxtx_port_config(rte_port);
2276 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2277 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2278 rx_vft_set(pid, vlan_tags[i], 1);
2280 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2281 map_port_queue_stats_mapping_registers(pid, rte_port);
2283 rte_port->dcb_flag = 1;
2291 /* Configuration of Ethernet ports. */
2292 ports = rte_zmalloc("testpmd: ports",
2293 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2294 RTE_CACHE_LINE_SIZE);
2295 if (ports == NULL) {
2296 rte_exit(EXIT_FAILURE,
2297 "rte_zmalloc(%d struct rte_port) failed\n",
2313 const char clr[] = { 27, '[', '2', 'J', '\0' };
2314 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2316 /* Clear screen and move to top left */
2317 printf("%s%s", clr, top_left);
2319 printf("\nPort statistics ====================================");
2320 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2321 nic_stats_display(fwd_ports_ids[i]);
2325 signal_handler(int signum)
2327 if (signum == SIGINT || signum == SIGTERM) {
2328 printf("\nSignal %d received, preparing to exit...\n",
2330 #ifdef RTE_LIBRTE_PDUMP
2331 /* uninitialize packet capture framework */
2334 #ifdef RTE_LIBRTE_LATENCY_STATS
2335 rte_latencystats_uninit();
2338 /* Set flag to indicate the force termination. */
2340 /* exit with the expected status */
2341 signal(signum, SIG_DFL);
2342 kill(getpid(), signum);
2347 main(int argc, char** argv)
2352 signal(SIGINT, signal_handler);
2353 signal(SIGTERM, signal_handler);
2355 diag = rte_eal_init(argc, argv);
2357 rte_panic("Cannot init EAL\n");
2359 testpmd_logtype = rte_log_register("testpmd");
2360 if (testpmd_logtype < 0)
2361 rte_panic("Cannot register log type");
2362 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2364 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2365 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2369 #ifdef RTE_LIBRTE_PDUMP
2370 /* initialize packet capture framework */
2371 rte_pdump_init(NULL);
2374 nb_ports = (portid_t) rte_eth_dev_count();
2376 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2378 /* allocate port structures, and init them */
2381 set_def_fwd_config();
2383 rte_panic("Empty set of forwarding logical cores - check the "
2384 "core mask supplied in the command parameters\n");
2386 /* Bitrate/latency stats disabled by default */
2387 #ifdef RTE_LIBRTE_BITRATE
2388 bitrate_enabled = 0;
2390 #ifdef RTE_LIBRTE_LATENCY_STATS
2391 latencystats_enabled = 0;
2397 launch_args_parse(argc, argv);
2399 if (tx_first && interactive)
2400 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2401 "interactive mode.\n");
2403 if (tx_first && lsc_interrupt) {
2404 printf("Warning: lsc_interrupt needs to be off when "
2405 " using tx_first. Disabling.\n");
2409 if (!nb_rxq && !nb_txq)
2410 printf("Warning: Either rx or tx queues should be non-zero\n");
2412 if (nb_rxq > 1 && nb_rxq > nb_txq)
2413 printf("Warning: nb_rxq=%d enables RSS configuration, "
2414 "but nb_txq=%d will prevent to fully test it.\n",
2418 if (start_port(RTE_PORT_ALL) != 0)
2419 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2421 /* set all ports to promiscuous mode by default */
2422 RTE_ETH_FOREACH_DEV(port_id)
2423 rte_eth_promiscuous_enable(port_id);
2425 /* Init metrics library */
2426 rte_metrics_init(rte_socket_id());
2428 #ifdef RTE_LIBRTE_LATENCY_STATS
2429 if (latencystats_enabled != 0) {
2430 int ret = rte_latencystats_init(1, NULL);
2432 printf("Warning: latencystats init()"
2433 " returned error %d\n", ret);
2434 printf("Latencystats running on lcore %d\n",
2435 latencystats_lcore_id);
2439 /* Setup bitrate stats */
2440 #ifdef RTE_LIBRTE_BITRATE
2441 if (bitrate_enabled != 0) {
2442 bitrate_data = rte_stats_bitrate_create();
2443 if (bitrate_data == NULL)
2444 rte_exit(EXIT_FAILURE,
2445 "Could not allocate bitrate data.\n");
2446 rte_stats_bitrate_reg(bitrate_data);
2450 #ifdef RTE_LIBRTE_CMDLINE
2451 if (strlen(cmdline_filename) != 0)
2452 cmdline_read_from_file(cmdline_filename);
2454 if (interactive == 1) {
2456 printf("Start automatic packet forwarding\n");
2457 start_packet_forwarding(0);
2469 printf("No commandline core given, start packet forwarding\n");
2470 start_packet_forwarding(tx_first);
2471 if (stats_period != 0) {
2472 uint64_t prev_time = 0, cur_time, diff_time = 0;
2473 uint64_t timer_period;
2475 /* Convert to number of cycles */
2476 timer_period = stats_period * rte_get_timer_hz();
2478 while (f_quit == 0) {
2479 cur_time = rte_get_timer_cycles();
2480 diff_time += cur_time - prev_time;
2482 if (diff_time >= timer_period) {
2484 /* Reset the timer */
2487 /* Sleep to avoid unnecessary checks */
2488 prev_time = cur_time;
2493 printf("Press enter to exit\n");
2494 rc = read(0, &c, 1);