1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <sys/types.h>
16 #include <sys/queue.h>
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
41 #include <rte_mbuf_pool_ops.h>
42 #include <rte_interrupts.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
47 #include <rte_string_fns.h>
48 #ifdef RTE_LIBRTE_IXGBE_PMD
49 #include <rte_pmd_ixgbe.h>
51 #ifdef RTE_LIBRTE_PDUMP
52 #include <rte_pdump.h>
55 #include <rte_metrics.h>
56 #ifdef RTE_LIBRTE_BITRATE
57 #include <rte_bitrate.h>
59 #ifdef RTE_LIBRTE_LATENCY_STATS
60 #include <rte_latencystats.h>
65 uint16_t verbose_level = 0; /**< Silent by default. */
66 int testpmd_logtype; /**< Log type for testpmd logs */
68 /* use master core for command line ? */
69 uint8_t interactive = 0;
70 uint8_t auto_start = 0;
72 char cmdline_filename[PATH_MAX] = {0};
75 * NUMA support configuration.
76 * When set, the NUMA support attempts to dispatch the allocation of the
77 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
78 * probed ports among the CPU sockets 0 and 1.
79 * Otherwise, all memory is allocated from CPU socket 0.
81 uint8_t numa_support = 1; /**< numa enabled by default */
84 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
87 uint8_t socket_num = UMA_NO_CONFIG;
90 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
95 * Store specified sockets on which memory pool to be used by ports
98 uint8_t port_numa[RTE_MAX_ETHPORTS];
101 * Store specified sockets on which RX ring to be used by ports
104 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
107 * Store specified sockets on which TX ring to be used by ports
110 uint8_t txring_numa[RTE_MAX_ETHPORTS];
113 * Record the Ethernet address of peer target ports to which packets are
115 * Must be instantiated with the ethernet addresses of peer traffic generator
118 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
119 portid_t nb_peer_eth_addrs = 0;
122 * Probed Target Environment.
124 struct rte_port *ports; /**< For all probed ethernet ports. */
125 portid_t nb_ports; /**< Number of probed ethernet ports. */
126 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
127 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
130 * Test Forwarding Configuration.
131 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
132 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
134 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
135 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
136 portid_t nb_cfg_ports; /**< Number of configured ports. */
137 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
139 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
140 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
142 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
143 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
146 * Forwarding engines.
148 struct fwd_engine * fwd_engines[] = {
157 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
159 &softnic_tm_bypass_engine,
161 #ifdef RTE_LIBRTE_IEEE1588
162 &ieee1588_fwd_engine,
167 struct fwd_config cur_fwd_config;
168 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
169 uint32_t retry_enabled;
170 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
171 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
173 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
174 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
175 * specified on command-line. */
176 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
179 * In container, it cannot terminate the process which running with 'stats-period'
180 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
185 * Configuration of packet segments used by the "txonly" processing engine.
187 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
188 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
189 TXONLY_DEF_PACKET_LEN,
191 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
194 /**< Split policy for packets to TX. */
196 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
197 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 /* current configuration is in DCB or not,0 means it is not in DCB mode */
200 uint8_t dcb_config = 0;
202 /* Whether the dcb is in testing status */
203 uint8_t dcb_test = 0;
206 * Configurable number of RX/TX queues.
208 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
209 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 * Configurable number of RX/TX ring descriptors.
214 #define RTE_TEST_RX_DESC_DEFAULT 1024
215 #define RTE_TEST_TX_DESC_DEFAULT 1024
216 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
217 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 #define RTE_PMD_PARAM_UNSET -1
221 * Configurable values of RX and TX ring threshold registers.
224 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 * Configurable value of RX free threshold.
235 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 * Configurable value of RX drop enable.
240 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 * Configurable value of TX free threshold.
245 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 * Configurable value of TX RS bit threshold.
250 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 * Receive Side Scaling (RSS) configuration.
255 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 * Port topology configuration
260 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 * Avoids to flush all the RX streams before starts forwarding.
265 uint8_t no_flush_rx = 0; /* flush by default */
268 * Flow API isolated mode.
270 uint8_t flow_isolate_all;
273 * Avoids to check link status when starting/stopping a port.
275 uint8_t no_link_check = 0; /* check by default */
278 * Enable link status change notification
280 uint8_t lsc_interrupt = 1; /* enabled by default */
283 * Enable device removal notification.
285 uint8_t rmv_interrupt = 1; /* enabled by default */
288 * Display or mask ether events
289 * Default to all events except VF_MBOX
291 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
292 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
293 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
294 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
295 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
296 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
299 * NIC bypass mode configuration options.
302 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
303 /* The NIC bypass watchdog timeout. */
304 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
308 #ifdef RTE_LIBRTE_LATENCY_STATS
311 * Set when latency stats is enabled in the commandline
313 uint8_t latencystats_enabled;
316 * Lcore ID to serive latency statistics.
318 lcoreid_t latencystats_lcore_id = -1;
323 * Ethernet device configuration.
325 struct rte_eth_rxmode rx_mode = {
326 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
327 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
328 .ignore_offload_bitfield = 1,
331 struct rte_eth_txmode tx_mode = {
332 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
335 struct rte_fdir_conf fdir_conf = {
336 .mode = RTE_FDIR_MODE_NONE,
337 .pballoc = RTE_FDIR_PBALLOC_64K,
338 .status = RTE_FDIR_REPORT_STATUS,
340 .vlan_tci_mask = 0x0,
342 .src_ip = 0xFFFFFFFF,
343 .dst_ip = 0xFFFFFFFF,
346 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
347 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
349 .src_port_mask = 0xFFFF,
350 .dst_port_mask = 0xFFFF,
351 .mac_addr_byte_mask = 0xFF,
352 .tunnel_type_mask = 1,
353 .tunnel_id_mask = 0xFFFFFFFF,
358 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
360 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
361 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
363 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
364 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
366 uint16_t nb_tx_queue_stats_mappings = 0;
367 uint16_t nb_rx_queue_stats_mappings = 0;
370 * Display zero values by default for xstats
372 uint8_t xstats_hide_zero;
374 unsigned int num_sockets = 0;
375 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
377 #ifdef RTE_LIBRTE_BITRATE
378 /* Bitrate statistics */
379 struct rte_stats_bitrates *bitrate_data;
380 lcoreid_t bitrate_lcore_id;
381 uint8_t bitrate_enabled;
384 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
385 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
387 /* Forward function declarations */
388 static void map_port_queue_stats_mapping_registers(portid_t pi,
389 struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 enum rte_eth_event_type type,
393 void *param, void *ret_param);
396 * Check if all the ports are started.
397 * If yes, return positive value. If not, return zero.
399 static int all_ports_started(void);
401 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
402 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
405 * Helper function to check if socket is already discovered.
406 * If yes, return positive value. If not, return zero.
409 new_socket_id(unsigned int socket_id)
413 for (i = 0; i < num_sockets; i++) {
414 if (socket_ids[i] == socket_id)
421 * Setup default configuration.
424 set_default_fwd_lcores_config(void)
428 unsigned int sock_num;
431 for (i = 0; i < RTE_MAX_LCORE; i++) {
432 sock_num = rte_lcore_to_socket_id(i);
433 if (new_socket_id(sock_num)) {
434 if (num_sockets >= RTE_MAX_NUMA_NODES) {
435 rte_exit(EXIT_FAILURE,
436 "Total sockets greater than %u\n",
439 socket_ids[num_sockets++] = sock_num;
441 if (!rte_lcore_is_enabled(i))
443 if (i == rte_get_master_lcore())
445 fwd_lcores_cpuids[nb_lc++] = i;
447 nb_lcores = (lcoreid_t) nb_lc;
448 nb_cfg_lcores = nb_lcores;
453 set_def_peer_eth_addrs(void)
457 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
459 peer_eth_addrs[i].addr_bytes[5] = i;
464 set_default_fwd_ports_config(void)
469 RTE_ETH_FOREACH_DEV(pt_id)
470 fwd_ports_ids[i++] = pt_id;
472 nb_cfg_ports = nb_ports;
473 nb_fwd_ports = nb_ports;
477 set_def_fwd_config(void)
479 set_default_fwd_lcores_config();
480 set_def_peer_eth_addrs();
481 set_default_fwd_ports_config();
485 * Configuration initialisation done once at init time.
488 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
489 unsigned int socket_id)
491 char pool_name[RTE_MEMPOOL_NAMESIZE];
492 struct rte_mempool *rte_mp = NULL;
495 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
496 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
499 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
500 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
503 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504 mb_size, (unsigned) mb_mempool_cache,
505 sizeof(struct rte_pktmbuf_pool_private),
510 if (rte_mempool_populate_anon(rte_mp) == 0) {
511 rte_mempool_free(rte_mp);
515 rte_pktmbuf_pool_init(rte_mp, NULL);
516 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
518 /* wrapper to rte_mempool_create() */
519 TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
520 rte_mbuf_best_mempool_ops());
521 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
522 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
526 if (rte_mp == NULL) {
527 rte_exit(EXIT_FAILURE,
528 "Creation of mbuf pool for socket %u failed: %s\n",
529 socket_id, rte_strerror(rte_errno));
530 } else if (verbose_level > 0) {
531 rte_mempool_dump(stdout, rte_mp);
536 * Check given socket id is valid or not with NUMA mode,
537 * if valid, return 0, else return -1
540 check_socket_id(const unsigned int socket_id)
542 static int warning_once = 0;
544 if (new_socket_id(socket_id)) {
545 if (!warning_once && numa_support)
546 printf("Warning: NUMA should be configured manually by"
547 " using --port-numa-config and"
548 " --ring-numa-config parameters along with"
557 * Get the allowed maximum number of RX queues.
558 * *pid return the port id which has minimal value of
559 * max_rx_queues in all ports.
562 get_allowed_max_nb_rxq(portid_t *pid)
564 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
566 struct rte_eth_dev_info dev_info;
568 RTE_ETH_FOREACH_DEV(pi) {
569 rte_eth_dev_info_get(pi, &dev_info);
570 if (dev_info.max_rx_queues < allowed_max_rxq) {
571 allowed_max_rxq = dev_info.max_rx_queues;
575 return allowed_max_rxq;
579 * Check input rxq is valid or not.
580 * If input rxq is not greater than any of maximum number
581 * of RX queues of all ports, it is valid.
582 * if valid, return 0, else return -1
585 check_nb_rxq(queueid_t rxq)
587 queueid_t allowed_max_rxq;
590 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
591 if (rxq > allowed_max_rxq) {
592 printf("Fail: input rxq (%u) can't be greater "
593 "than max_rx_queues (%u) of port %u\n",
603 * Get the allowed maximum number of TX queues.
604 * *pid return the port id which has minimal value of
605 * max_tx_queues in all ports.
608 get_allowed_max_nb_txq(portid_t *pid)
610 queueid_t allowed_max_txq = MAX_QUEUE_ID;
612 struct rte_eth_dev_info dev_info;
614 RTE_ETH_FOREACH_DEV(pi) {
615 rte_eth_dev_info_get(pi, &dev_info);
616 if (dev_info.max_tx_queues < allowed_max_txq) {
617 allowed_max_txq = dev_info.max_tx_queues;
621 return allowed_max_txq;
625 * Check input txq is valid or not.
626 * If input txq is not greater than any of maximum number
627 * of TX queues of all ports, it is valid.
628 * if valid, return 0, else return -1
631 check_nb_txq(queueid_t txq)
633 queueid_t allowed_max_txq;
636 allowed_max_txq = get_allowed_max_nb_txq(&pid);
637 if (txq > allowed_max_txq) {
638 printf("Fail: input txq (%u) can't be greater "
639 "than max_tx_queues (%u) of port %u\n",
652 struct rte_port *port;
653 struct rte_mempool *mbp;
654 unsigned int nb_mbuf_per_pool;
656 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
657 struct rte_gro_param gro_param;
660 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
663 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
664 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
665 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
668 /* Configuration of logical cores. */
669 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
670 sizeof(struct fwd_lcore *) * nb_lcores,
671 RTE_CACHE_LINE_SIZE);
672 if (fwd_lcores == NULL) {
673 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
674 "failed\n", nb_lcores);
676 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
677 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
678 sizeof(struct fwd_lcore),
679 RTE_CACHE_LINE_SIZE);
680 if (fwd_lcores[lc_id] == NULL) {
681 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
684 fwd_lcores[lc_id]->cpuid_idx = lc_id;
687 RTE_ETH_FOREACH_DEV(pid) {
689 /* Apply default TxRx configuration for all ports */
690 port->dev_conf.txmode = tx_mode;
691 port->dev_conf.rxmode = rx_mode;
692 rte_eth_dev_info_get(pid, &port->dev_info);
693 if (!(port->dev_info.tx_offload_capa &
694 DEV_TX_OFFLOAD_MBUF_FAST_FREE))
695 port->dev_conf.txmode.offloads &=
696 ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
698 if (port_numa[pid] != NUMA_NO_CONFIG)
699 port_per_socket[port_numa[pid]]++;
701 uint32_t socket_id = rte_eth_dev_socket_id(pid);
703 /* if socket_id is invalid, set to 0 */
704 if (check_socket_id(socket_id) < 0)
706 port_per_socket[socket_id]++;
710 /* set flag to initialize port/queue */
711 port->need_reconfig = 1;
712 port->need_reconfig_queues = 1;
716 * Create pools of mbuf.
717 * If NUMA support is disabled, create a single pool of mbuf in
718 * socket 0 memory by default.
719 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
721 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
722 * nb_txd can be configured at run time.
724 if (param_total_num_mbufs)
725 nb_mbuf_per_pool = param_total_num_mbufs;
727 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
728 (nb_lcores * mb_mempool_cache) +
729 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
730 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
736 for (i = 0; i < num_sockets; i++)
737 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
740 if (socket_num == UMA_NO_CONFIG)
741 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
743 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
749 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
750 DEV_TX_OFFLOAD_GRE_TNL_TSO;
752 * Records which Mbuf pool to use by each logical core, if needed.
754 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
755 mbp = mbuf_pool_find(
756 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
759 mbp = mbuf_pool_find(0);
760 fwd_lcores[lc_id]->mbp = mbp;
761 /* initialize GSO context */
762 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
763 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
764 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
765 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
767 fwd_lcores[lc_id]->gso_ctx.flag = 0;
770 /* Configuration of packet forwarding streams. */
771 if (init_fwd_streams() < 0)
772 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
776 /* create a gro context for each lcore */
777 gro_param.gro_types = RTE_GRO_TCP_IPV4;
778 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
779 gro_param.max_item_per_flow = MAX_PKT_BURST;
780 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
781 gro_param.socket_id = rte_lcore_to_socket_id(
782 fwd_lcores_cpuids[lc_id]);
783 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
784 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
785 rte_exit(EXIT_FAILURE,
786 "rte_gro_ctx_create() failed\n");
793 reconfig(portid_t new_port_id, unsigned socket_id)
795 struct rte_port *port;
797 /* Reconfiguration of Ethernet ports. */
798 port = &ports[new_port_id];
799 rte_eth_dev_info_get(new_port_id, &port->dev_info);
801 /* set flag to initialize port/queue */
802 port->need_reconfig = 1;
803 port->need_reconfig_queues = 1;
804 port->socket_id = socket_id;
811 init_fwd_streams(void)
814 struct rte_port *port;
815 streamid_t sm_id, nb_fwd_streams_new;
818 /* set socket id according to numa or not */
819 RTE_ETH_FOREACH_DEV(pid) {
821 if (nb_rxq > port->dev_info.max_rx_queues) {
822 printf("Fail: nb_rxq(%d) is greater than "
823 "max_rx_queues(%d)\n", nb_rxq,
824 port->dev_info.max_rx_queues);
827 if (nb_txq > port->dev_info.max_tx_queues) {
828 printf("Fail: nb_txq(%d) is greater than "
829 "max_tx_queues(%d)\n", nb_txq,
830 port->dev_info.max_tx_queues);
834 if (port_numa[pid] != NUMA_NO_CONFIG)
835 port->socket_id = port_numa[pid];
837 port->socket_id = rte_eth_dev_socket_id(pid);
839 /* if socket_id is invalid, set to 0 */
840 if (check_socket_id(port->socket_id) < 0)
845 if (socket_num == UMA_NO_CONFIG)
848 port->socket_id = socket_num;
852 q = RTE_MAX(nb_rxq, nb_txq);
854 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
857 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
858 if (nb_fwd_streams_new == nb_fwd_streams)
861 if (fwd_streams != NULL) {
862 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
863 if (fwd_streams[sm_id] == NULL)
865 rte_free(fwd_streams[sm_id]);
866 fwd_streams[sm_id] = NULL;
868 rte_free(fwd_streams);
873 nb_fwd_streams = nb_fwd_streams_new;
874 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
875 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
876 if (fwd_streams == NULL)
877 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
878 "failed\n", nb_fwd_streams);
880 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
881 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
882 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
883 if (fwd_streams[sm_id] == NULL)
884 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
891 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
893 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
895 unsigned int total_burst;
896 unsigned int nb_burst;
897 unsigned int burst_stats[3];
898 uint16_t pktnb_stats[3];
900 int burst_percent[3];
903 * First compute the total number of packet bursts and the
904 * two highest numbers of bursts of the same number of packets.
907 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
908 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
909 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
910 nb_burst = pbs->pkt_burst_spread[nb_pkt];
913 total_burst += nb_burst;
914 if (nb_burst > burst_stats[0]) {
915 burst_stats[1] = burst_stats[0];
916 pktnb_stats[1] = pktnb_stats[0];
917 burst_stats[0] = nb_burst;
918 pktnb_stats[0] = nb_pkt;
921 if (total_burst == 0)
923 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
924 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
925 burst_percent[0], (int) pktnb_stats[0]);
926 if (burst_stats[0] == total_burst) {
930 if (burst_stats[0] + burst_stats[1] == total_burst) {
931 printf(" + %d%% of %d pkts]\n",
932 100 - burst_percent[0], pktnb_stats[1]);
935 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
936 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
937 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
938 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
941 printf(" + %d%% of %d pkts + %d%% of others]\n",
942 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
944 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
947 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
949 struct rte_port *port;
952 static const char *fwd_stats_border = "----------------------";
954 port = &ports[port_id];
955 printf("\n %s Forward statistics for port %-2d %s\n",
956 fwd_stats_border, port_id, fwd_stats_border);
958 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
959 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
961 stats->ipackets, stats->imissed,
962 (uint64_t) (stats->ipackets + stats->imissed));
964 if (cur_fwd_eng == &csum_fwd_engine)
965 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
966 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
967 if ((stats->ierrors + stats->rx_nombuf) > 0) {
968 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
969 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
972 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
974 stats->opackets, port->tx_dropped,
975 (uint64_t) (stats->opackets + port->tx_dropped));
978 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
980 stats->ipackets, stats->imissed,
981 (uint64_t) (stats->ipackets + stats->imissed));
983 if (cur_fwd_eng == &csum_fwd_engine)
984 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
985 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
986 if ((stats->ierrors + stats->rx_nombuf) > 0) {
987 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
988 printf(" RX-nombufs: %14"PRIu64"\n",
992 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
994 stats->opackets, port->tx_dropped,
995 (uint64_t) (stats->opackets + port->tx_dropped));
998 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1000 pkt_burst_stats_display("RX",
1001 &port->rx_stream->rx_burst_stats);
1002 if (port->tx_stream)
1003 pkt_burst_stats_display("TX",
1004 &port->tx_stream->tx_burst_stats);
1007 if (port->rx_queue_stats_mapping_enabled) {
1009 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1010 printf(" Stats reg %2d RX-packets:%14"PRIu64
1011 " RX-errors:%14"PRIu64
1012 " RX-bytes:%14"PRIu64"\n",
1013 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1017 if (port->tx_queue_stats_mapping_enabled) {
1018 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1019 printf(" Stats reg %2d TX-packets:%14"PRIu64
1020 " TX-bytes:%14"PRIu64"\n",
1021 i, stats->q_opackets[i], stats->q_obytes[i]);
1025 printf(" %s--------------------------------%s\n",
1026 fwd_stats_border, fwd_stats_border);
1030 fwd_stream_stats_display(streamid_t stream_id)
1032 struct fwd_stream *fs;
1033 static const char *fwd_top_stats_border = "-------";
1035 fs = fwd_streams[stream_id];
1036 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1037 (fs->fwd_dropped == 0))
1039 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1040 "TX Port=%2d/Queue=%2d %s\n",
1041 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1042 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1043 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1044 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1046 /* if checksum mode */
1047 if (cur_fwd_eng == &csum_fwd_engine) {
1048 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1049 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1052 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1053 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1054 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1059 flush_fwd_rx_queues(void)
1061 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1068 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1069 uint64_t timer_period;
1071 /* convert to number of cycles */
1072 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1074 for (j = 0; j < 2; j++) {
1075 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1076 for (rxq = 0; rxq < nb_rxq; rxq++) {
1077 port_id = fwd_ports_ids[rxp];
1079 * testpmd can stuck in the below do while loop
1080 * if rte_eth_rx_burst() always returns nonzero
1081 * packets. So timer is added to exit this loop
1082 * after 1sec timer expiry.
1084 prev_tsc = rte_rdtsc();
1086 nb_rx = rte_eth_rx_burst(port_id, rxq,
1087 pkts_burst, MAX_PKT_BURST);
1088 for (i = 0; i < nb_rx; i++)
1089 rte_pktmbuf_free(pkts_burst[i]);
1091 cur_tsc = rte_rdtsc();
1092 diff_tsc = cur_tsc - prev_tsc;
1093 timer_tsc += diff_tsc;
1094 } while ((nb_rx > 0) &&
1095 (timer_tsc < timer_period));
1099 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1104 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1106 struct fwd_stream **fsm;
1109 #ifdef RTE_LIBRTE_BITRATE
1110 uint64_t tics_per_1sec;
1111 uint64_t tics_datum;
1112 uint64_t tics_current;
1113 uint8_t idx_port, cnt_ports;
1115 cnt_ports = rte_eth_dev_count();
1116 tics_datum = rte_rdtsc();
1117 tics_per_1sec = rte_get_timer_hz();
1119 fsm = &fwd_streams[fc->stream_idx];
1120 nb_fs = fc->stream_nb;
1122 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1123 (*pkt_fwd)(fsm[sm_id]);
1124 #ifdef RTE_LIBRTE_BITRATE
1125 if (bitrate_enabled != 0 &&
1126 bitrate_lcore_id == rte_lcore_id()) {
1127 tics_current = rte_rdtsc();
1128 if (tics_current - tics_datum >= tics_per_1sec) {
1129 /* Periodic bitrate calculation */
1131 idx_port < cnt_ports;
1133 rte_stats_bitrate_calc(bitrate_data,
1135 tics_datum = tics_current;
1139 #ifdef RTE_LIBRTE_LATENCY_STATS
1140 if (latencystats_enabled != 0 &&
1141 latencystats_lcore_id == rte_lcore_id())
1142 rte_latencystats_update();
1145 } while (! fc->stopped);
1149 start_pkt_forward_on_core(void *fwd_arg)
1151 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1152 cur_fwd_config.fwd_eng->packet_fwd);
1157 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1158 * Used to start communication flows in network loopback test configurations.
1161 run_one_txonly_burst_on_core(void *fwd_arg)
1163 struct fwd_lcore *fwd_lc;
1164 struct fwd_lcore tmp_lcore;
1166 fwd_lc = (struct fwd_lcore *) fwd_arg;
1167 tmp_lcore = *fwd_lc;
1168 tmp_lcore.stopped = 1;
1169 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1174 * Launch packet forwarding:
1175 * - Setup per-port forwarding context.
1176 * - launch logical cores with their forwarding configuration.
1179 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1181 port_fwd_begin_t port_fwd_begin;
1186 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1187 if (port_fwd_begin != NULL) {
1188 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1189 (*port_fwd_begin)(fwd_ports_ids[i]);
1191 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1192 lc_id = fwd_lcores_cpuids[i];
1193 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1194 fwd_lcores[i]->stopped = 0;
1195 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1196 fwd_lcores[i], lc_id);
1198 printf("launch lcore %u failed - diag=%d\n",
1205 * Launch packet forwarding configuration.
1208 start_packet_forwarding(int with_tx_first)
1210 port_fwd_begin_t port_fwd_begin;
1211 port_fwd_end_t port_fwd_end;
1212 struct rte_port *port;
1217 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1218 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1220 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1221 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1223 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1224 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1225 (!nb_rxq || !nb_txq))
1226 rte_exit(EXIT_FAILURE,
1227 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1228 cur_fwd_eng->fwd_mode_name);
1230 if (all_ports_started() == 0) {
1231 printf("Not all ports were started\n");
1234 if (test_done == 0) {
1235 printf("Packet forwarding already started\n");
1239 if (init_fwd_streams() < 0) {
1240 printf("Fail from init_fwd_streams()\n");
1245 for (i = 0; i < nb_fwd_ports; i++) {
1246 pt_id = fwd_ports_ids[i];
1247 port = &ports[pt_id];
1248 if (!port->dcb_flag) {
1249 printf("In DCB mode, all forwarding ports must "
1250 "be configured in this mode.\n");
1254 if (nb_fwd_lcores == 1) {
1255 printf("In DCB mode,the nb forwarding cores "
1256 "should be larger than 1.\n");
1263 flush_fwd_rx_queues();
1266 pkt_fwd_config_display(&cur_fwd_config);
1267 rxtx_config_display();
1269 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1270 pt_id = fwd_ports_ids[i];
1271 port = &ports[pt_id];
1272 rte_eth_stats_get(pt_id, &port->stats);
1273 port->tx_dropped = 0;
1275 map_port_queue_stats_mapping_registers(pt_id, port);
1277 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1278 fwd_streams[sm_id]->rx_packets = 0;
1279 fwd_streams[sm_id]->tx_packets = 0;
1280 fwd_streams[sm_id]->fwd_dropped = 0;
1281 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1282 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1284 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1285 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1286 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1287 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1288 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1290 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1291 fwd_streams[sm_id]->core_cycles = 0;
1294 if (with_tx_first) {
1295 port_fwd_begin = tx_only_engine.port_fwd_begin;
1296 if (port_fwd_begin != NULL) {
1297 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1298 (*port_fwd_begin)(fwd_ports_ids[i]);
1300 while (with_tx_first--) {
1301 launch_packet_forwarding(
1302 run_one_txonly_burst_on_core);
1303 rte_eal_mp_wait_lcore();
1305 port_fwd_end = tx_only_engine.port_fwd_end;
1306 if (port_fwd_end != NULL) {
1307 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1308 (*port_fwd_end)(fwd_ports_ids[i]);
1311 launch_packet_forwarding(start_pkt_forward_on_core);
1315 stop_packet_forwarding(void)
1317 struct rte_eth_stats stats;
1318 struct rte_port *port;
1319 port_fwd_end_t port_fwd_end;
1324 uint64_t total_recv;
1325 uint64_t total_xmit;
1326 uint64_t total_rx_dropped;
1327 uint64_t total_tx_dropped;
1328 uint64_t total_rx_nombuf;
1329 uint64_t tx_dropped;
1330 uint64_t rx_bad_ip_csum;
1331 uint64_t rx_bad_l4_csum;
1332 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1333 uint64_t fwd_cycles;
1336 static const char *acc_stats_border = "+++++++++++++++";
1339 printf("Packet forwarding not started\n");
1342 printf("Telling cores to stop...");
1343 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1344 fwd_lcores[lc_id]->stopped = 1;
1345 printf("\nWaiting for lcores to finish...\n");
1346 rte_eal_mp_wait_lcore();
1347 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1348 if (port_fwd_end != NULL) {
1349 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1350 pt_id = fwd_ports_ids[i];
1351 (*port_fwd_end)(pt_id);
1354 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1357 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1358 if (cur_fwd_config.nb_fwd_streams >
1359 cur_fwd_config.nb_fwd_ports) {
1360 fwd_stream_stats_display(sm_id);
1361 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1362 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1364 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1366 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1369 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1370 tx_dropped = (uint64_t) (tx_dropped +
1371 fwd_streams[sm_id]->fwd_dropped);
1372 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1375 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1376 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1377 fwd_streams[sm_id]->rx_bad_ip_csum);
1378 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1382 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1383 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1384 fwd_streams[sm_id]->rx_bad_l4_csum);
1385 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1388 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1389 fwd_cycles = (uint64_t) (fwd_cycles +
1390 fwd_streams[sm_id]->core_cycles);
1395 total_rx_dropped = 0;
1396 total_tx_dropped = 0;
1397 total_rx_nombuf = 0;
1398 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1399 pt_id = fwd_ports_ids[i];
1401 port = &ports[pt_id];
1402 rte_eth_stats_get(pt_id, &stats);
1403 stats.ipackets -= port->stats.ipackets;
1404 port->stats.ipackets = 0;
1405 stats.opackets -= port->stats.opackets;
1406 port->stats.opackets = 0;
1407 stats.ibytes -= port->stats.ibytes;
1408 port->stats.ibytes = 0;
1409 stats.obytes -= port->stats.obytes;
1410 port->stats.obytes = 0;
1411 stats.imissed -= port->stats.imissed;
1412 port->stats.imissed = 0;
1413 stats.oerrors -= port->stats.oerrors;
1414 port->stats.oerrors = 0;
1415 stats.rx_nombuf -= port->stats.rx_nombuf;
1416 port->stats.rx_nombuf = 0;
1418 total_recv += stats.ipackets;
1419 total_xmit += stats.opackets;
1420 total_rx_dropped += stats.imissed;
1421 total_tx_dropped += port->tx_dropped;
1422 total_rx_nombuf += stats.rx_nombuf;
1424 fwd_port_stats_display(pt_id, &stats);
1427 printf("\n %s Accumulated forward statistics for all ports"
1429 acc_stats_border, acc_stats_border);
1430 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1432 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1434 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1435 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1436 if (total_rx_nombuf > 0)
1437 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1438 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1440 acc_stats_border, acc_stats_border);
1441 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1443 printf("\n CPU cycles/packet=%u (total cycles="
1444 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1445 (unsigned int)(fwd_cycles / total_recv),
1446 fwd_cycles, total_recv);
1448 printf("\nDone.\n");
1453 dev_set_link_up(portid_t pid)
1455 if (rte_eth_dev_set_link_up(pid) < 0)
1456 printf("\nSet link up fail.\n");
1460 dev_set_link_down(portid_t pid)
1462 if (rte_eth_dev_set_link_down(pid) < 0)
1463 printf("\nSet link down fail.\n");
1467 all_ports_started(void)
1470 struct rte_port *port;
1472 RTE_ETH_FOREACH_DEV(pi) {
1474 /* Check if there is a port which is not started */
1475 if ((port->port_status != RTE_PORT_STARTED) &&
1476 (port->slave_flag == 0))
1480 /* No port is not started */
1485 port_is_stopped(portid_t port_id)
1487 struct rte_port *port = &ports[port_id];
1489 if ((port->port_status != RTE_PORT_STOPPED) &&
1490 (port->slave_flag == 0))
1496 all_ports_stopped(void)
1500 RTE_ETH_FOREACH_DEV(pi) {
1501 if (!port_is_stopped(pi))
1509 port_is_started(portid_t port_id)
1511 if (port_id_is_invalid(port_id, ENABLED_WARN))
1514 if (ports[port_id].port_status != RTE_PORT_STARTED)
1521 port_is_closed(portid_t port_id)
1523 if (port_id_is_invalid(port_id, ENABLED_WARN))
1526 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1533 start_port(portid_t pid)
1535 int diag, need_check_link_status = -1;
1538 struct rte_port *port;
1539 struct ether_addr mac_addr;
1540 enum rte_eth_event_type event_type;
1542 if (port_id_is_invalid(pid, ENABLED_WARN))
1547 RTE_ETH_FOREACH_DEV(pi) {
1548 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1551 need_check_link_status = 0;
1553 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1554 RTE_PORT_HANDLING) == 0) {
1555 printf("Port %d is now not stopped\n", pi);
1559 if (port->need_reconfig > 0) {
1560 port->need_reconfig = 0;
1562 if (flow_isolate_all) {
1563 int ret = port_flow_isolate(pi, 1);
1565 printf("Failed to apply isolated"
1566 " mode on port %d\n", pi);
1571 printf("Configuring Port %d (socket %u)\n", pi,
1573 /* configure port */
1574 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1577 if (rte_atomic16_cmpset(&(port->port_status),
1578 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1579 printf("Port %d can not be set back "
1580 "to stopped\n", pi);
1581 printf("Fail to configure port %d\n", pi);
1582 /* try to reconfigure port next time */
1583 port->need_reconfig = 1;
1587 if (port->need_reconfig_queues > 0) {
1588 port->need_reconfig_queues = 0;
1589 port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1590 /* Apply Tx offloads configuration */
1591 port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1592 /* setup tx queues */
1593 for (qi = 0; qi < nb_txq; qi++) {
1594 if ((numa_support) &&
1595 (txring_numa[pi] != NUMA_NO_CONFIG))
1596 diag = rte_eth_tx_queue_setup(pi, qi,
1597 nb_txd,txring_numa[pi],
1600 diag = rte_eth_tx_queue_setup(pi, qi,
1601 nb_txd,port->socket_id,
1607 /* Fail to setup tx queue, return */
1608 if (rte_atomic16_cmpset(&(port->port_status),
1610 RTE_PORT_STOPPED) == 0)
1611 printf("Port %d can not be set back "
1612 "to stopped\n", pi);
1613 printf("Fail to configure port %d tx queues\n", pi);
1614 /* try to reconfigure queues next time */
1615 port->need_reconfig_queues = 1;
1618 /* Apply Rx offloads configuration */
1619 port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1620 /* setup rx queues */
1621 for (qi = 0; qi < nb_rxq; qi++) {
1622 if ((numa_support) &&
1623 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1624 struct rte_mempool * mp =
1625 mbuf_pool_find(rxring_numa[pi]);
1627 printf("Failed to setup RX queue:"
1628 "No mempool allocation"
1629 " on the socket %d\n",
1634 diag = rte_eth_rx_queue_setup(pi, qi,
1635 nb_rxd,rxring_numa[pi],
1636 &(port->rx_conf),mp);
1638 struct rte_mempool *mp =
1639 mbuf_pool_find(port->socket_id);
1641 printf("Failed to setup RX queue:"
1642 "No mempool allocation"
1643 " on the socket %d\n",
1647 diag = rte_eth_rx_queue_setup(pi, qi,
1648 nb_rxd,port->socket_id,
1649 &(port->rx_conf), mp);
1654 /* Fail to setup rx queue, return */
1655 if (rte_atomic16_cmpset(&(port->port_status),
1657 RTE_PORT_STOPPED) == 0)
1658 printf("Port %d can not be set back "
1659 "to stopped\n", pi);
1660 printf("Fail to configure port %d rx queues\n", pi);
1661 /* try to reconfigure queues next time */
1662 port->need_reconfig_queues = 1;
1668 if (rte_eth_dev_start(pi) < 0) {
1669 printf("Fail to start port %d\n", pi);
1671 /* Fail to setup rx queue, return */
1672 if (rte_atomic16_cmpset(&(port->port_status),
1673 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1674 printf("Port %d can not be set back to "
1679 if (rte_atomic16_cmpset(&(port->port_status),
1680 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1681 printf("Port %d can not be set into started\n", pi);
1683 rte_eth_macaddr_get(pi, &mac_addr);
1684 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1685 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1686 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1687 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1689 /* at least one port started, need checking link status */
1690 need_check_link_status = 1;
1693 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1694 event_type < RTE_ETH_EVENT_MAX;
1696 diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
1701 printf("Failed to setup even callback for event %d\n",
1707 if (need_check_link_status == 1 && !no_link_check)
1708 check_all_ports_link_status(RTE_PORT_ALL);
1709 else if (need_check_link_status == 0)
1710 printf("Please stop the ports first\n");
1717 stop_port(portid_t pid)
1720 struct rte_port *port;
1721 int need_check_link_status = 0;
1728 if (port_id_is_invalid(pid, ENABLED_WARN))
1731 printf("Stopping ports...\n");
1733 RTE_ETH_FOREACH_DEV(pi) {
1734 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1737 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1738 printf("Please remove port %d from forwarding configuration.\n", pi);
1742 if (port_is_bonding_slave(pi)) {
1743 printf("Please remove port %d from bonded device.\n", pi);
1748 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1749 RTE_PORT_HANDLING) == 0)
1752 rte_eth_dev_stop(pi);
1754 if (rte_atomic16_cmpset(&(port->port_status),
1755 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1756 printf("Port %d can not be set into stopped\n", pi);
1757 need_check_link_status = 1;
1759 if (need_check_link_status && !no_link_check)
1760 check_all_ports_link_status(RTE_PORT_ALL);
1766 close_port(portid_t pid)
1769 struct rte_port *port;
1771 if (port_id_is_invalid(pid, ENABLED_WARN))
1774 printf("Closing ports...\n");
1776 RTE_ETH_FOREACH_DEV(pi) {
1777 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1780 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1781 printf("Please remove port %d from forwarding configuration.\n", pi);
1785 if (port_is_bonding_slave(pi)) {
1786 printf("Please remove port %d from bonded device.\n", pi);
1791 if (rte_atomic16_cmpset(&(port->port_status),
1792 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1793 printf("Port %d is already closed\n", pi);
1797 if (rte_atomic16_cmpset(&(port->port_status),
1798 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1799 printf("Port %d is now not stopped\n", pi);
1803 if (port->flow_list)
1804 port_flow_flush(pi);
1805 rte_eth_dev_close(pi);
1807 if (rte_atomic16_cmpset(&(port->port_status),
1808 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1809 printf("Port %d cannot be set to closed\n", pi);
1816 reset_port(portid_t pid)
1820 struct rte_port *port;
1822 if (port_id_is_invalid(pid, ENABLED_WARN))
1825 printf("Resetting ports...\n");
1827 RTE_ETH_FOREACH_DEV(pi) {
1828 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1831 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1832 printf("Please remove port %d from forwarding "
1833 "configuration.\n", pi);
1837 if (port_is_bonding_slave(pi)) {
1838 printf("Please remove port %d from bonded device.\n",
1843 diag = rte_eth_dev_reset(pi);
1846 port->need_reconfig = 1;
1847 port->need_reconfig_queues = 1;
1849 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1857 attach_port(char *identifier)
1860 unsigned int socket_id;
1862 printf("Attaching a new port...\n");
1864 if (identifier == NULL) {
1865 printf("Invalid parameters are specified\n");
1869 if (rte_eth_dev_attach(identifier, &pi))
1872 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1873 /* if socket_id is invalid, set to 0 */
1874 if (check_socket_id(socket_id) < 0)
1876 reconfig(pi, socket_id);
1877 rte_eth_promiscuous_enable(pi);
1879 nb_ports = rte_eth_dev_count();
1881 ports[pi].port_status = RTE_PORT_STOPPED;
1883 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1888 detach_port(portid_t port_id)
1890 char name[RTE_ETH_NAME_MAX_LEN];
1892 printf("Detaching a port...\n");
1894 if (!port_is_closed(port_id)) {
1895 printf("Please close port first\n");
1899 if (ports[port_id].flow_list)
1900 port_flow_flush(port_id);
1902 if (rte_eth_dev_detach(port_id, name)) {
1903 TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1907 nb_ports = rte_eth_dev_count();
1909 printf("Port '%s' is detached. Now total ports is %d\n",
1921 stop_packet_forwarding();
1923 if (ports != NULL) {
1925 RTE_ETH_FOREACH_DEV(pt_id) {
1926 printf("\nShutting down port %d...\n", pt_id);
1932 printf("\nBye...\n");
1935 typedef void (*cmd_func_t)(void);
1936 struct pmd_test_command {
1937 const char *cmd_name;
1938 cmd_func_t cmd_func;
1941 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1943 /* Check the link status of all ports in up to 9s, and print them finally */
1945 check_all_ports_link_status(uint32_t port_mask)
1947 #define CHECK_INTERVAL 100 /* 100ms */
1948 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1950 uint8_t count, all_ports_up, print_flag = 0;
1951 struct rte_eth_link link;
1953 printf("Checking link statuses...\n");
1955 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1957 RTE_ETH_FOREACH_DEV(portid) {
1958 if ((port_mask & (1 << portid)) == 0)
1960 memset(&link, 0, sizeof(link));
1961 rte_eth_link_get_nowait(portid, &link);
1962 /* print link status if flag set */
1963 if (print_flag == 1) {
1964 if (link.link_status)
1966 "Port%d Link Up. speed %u Mbps- %s\n",
1967 portid, link.link_speed,
1968 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1969 ("full-duplex") : ("half-duplex\n"));
1971 printf("Port %d Link Down\n", portid);
1974 /* clear all_ports_up flag if any link down */
1975 if (link.link_status == ETH_LINK_DOWN) {
1980 /* after finally printing all link status, get out */
1981 if (print_flag == 1)
1984 if (all_ports_up == 0) {
1986 rte_delay_ms(CHECK_INTERVAL);
1989 /* set the print_flag if all ports up or timeout */
1990 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2000 rmv_event_callback(void *arg)
2002 struct rte_eth_dev *dev;
2003 portid_t port_id = (intptr_t)arg;
2005 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2006 dev = &rte_eth_devices[port_id];
2009 close_port(port_id);
2010 printf("removing device %s\n", dev->device->name);
2011 if (rte_eal_dev_detach(dev->device))
2012 TESTPMD_LOG(ERR, "Failed to detach device %s\n",
2016 /* This function is used by the interrupt thread */
2018 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2021 static const char * const event_desc[] = {
2022 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2023 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2024 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2025 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2026 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2027 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2028 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2029 [RTE_ETH_EVENT_NEW] = "device probed",
2030 [RTE_ETH_EVENT_DESTROY] = "device released",
2031 [RTE_ETH_EVENT_MAX] = NULL,
2034 RTE_SET_USED(param);
2035 RTE_SET_USED(ret_param);
2037 if (type >= RTE_ETH_EVENT_MAX) {
2038 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2039 port_id, __func__, type);
2041 } else if (event_print_mask & (UINT32_C(1) << type)) {
2042 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2047 if (port_id_is_invalid(port_id, DISABLED_WARN))
2051 case RTE_ETH_EVENT_INTR_RMV:
2052 if (rte_eal_alarm_set(100000,
2053 rmv_event_callback, (void *)(intptr_t)port_id))
2054 fprintf(stderr, "Could not set up deferred device removal\n");
2063 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2067 uint8_t mapping_found = 0;
2069 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2070 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2071 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2072 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2073 tx_queue_stats_mappings[i].queue_id,
2074 tx_queue_stats_mappings[i].stats_counter_id);
2081 port->tx_queue_stats_mapping_enabled = 1;
2086 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2090 uint8_t mapping_found = 0;
2092 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2093 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2094 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2095 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2096 rx_queue_stats_mappings[i].queue_id,
2097 rx_queue_stats_mappings[i].stats_counter_id);
2104 port->rx_queue_stats_mapping_enabled = 1;
2109 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2113 diag = set_tx_queue_stats_mapping_registers(pi, port);
2115 if (diag == -ENOTSUP) {
2116 port->tx_queue_stats_mapping_enabled = 0;
2117 printf("TX queue stats mapping not supported port id=%d\n", pi);
2120 rte_exit(EXIT_FAILURE,
2121 "set_tx_queue_stats_mapping_registers "
2122 "failed for port id=%d diag=%d\n",
2126 diag = set_rx_queue_stats_mapping_registers(pi, port);
2128 if (diag == -ENOTSUP) {
2129 port->rx_queue_stats_mapping_enabled = 0;
2130 printf("RX queue stats mapping not supported port id=%d\n", pi);
2133 rte_exit(EXIT_FAILURE,
2134 "set_rx_queue_stats_mapping_registers "
2135 "failed for port id=%d diag=%d\n",
2141 rxtx_port_config(struct rte_port *port)
2143 port->rx_conf = port->dev_info.default_rxconf;
2144 port->tx_conf = port->dev_info.default_txconf;
2146 /* Check if any RX/TX parameters have been passed */
2147 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2148 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2150 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2151 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2153 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2154 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2156 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2157 port->rx_conf.rx_free_thresh = rx_free_thresh;
2159 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2160 port->rx_conf.rx_drop_en = rx_drop_en;
2162 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2163 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2165 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2166 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2168 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2169 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2171 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2172 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2174 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2175 port->tx_conf.tx_free_thresh = tx_free_thresh;
2179 init_port_config(void)
2182 struct rte_port *port;
2184 RTE_ETH_FOREACH_DEV(pid) {
2186 port->dev_conf.fdir_conf = fdir_conf;
2188 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2189 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2191 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2192 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2195 if (port->dcb_flag == 0) {
2196 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2197 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2199 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2202 rxtx_port_config(port);
2204 rte_eth_macaddr_get(pid, &port->eth_addr);
2206 map_port_queue_stats_mapping_registers(pid, port);
2207 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2208 rte_pmd_ixgbe_bypass_init(pid);
2211 if (lsc_interrupt &&
2212 (rte_eth_devices[pid].data->dev_flags &
2213 RTE_ETH_DEV_INTR_LSC))
2214 port->dev_conf.intr_conf.lsc = 1;
2215 if (rmv_interrupt &&
2216 (rte_eth_devices[pid].data->dev_flags &
2217 RTE_ETH_DEV_INTR_RMV))
2218 port->dev_conf.intr_conf.rmv = 1;
2220 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2221 /* Detect softnic port */
2222 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2223 port->softnic_enable = 1;
2224 memset(&port->softport, 0, sizeof(struct softnic_port));
2226 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2227 port->softport.tm_flag = 1;
2233 void set_port_slave_flag(portid_t slave_pid)
2235 struct rte_port *port;
2237 port = &ports[slave_pid];
2238 port->slave_flag = 1;
2241 void clear_port_slave_flag(portid_t slave_pid)
2243 struct rte_port *port;
2245 port = &ports[slave_pid];
2246 port->slave_flag = 0;
2249 uint8_t port_is_bonding_slave(portid_t slave_pid)
2251 struct rte_port *port;
2253 port = &ports[slave_pid];
2254 return port->slave_flag;
2257 const uint16_t vlan_tags[] = {
2258 0, 1, 2, 3, 4, 5, 6, 7,
2259 8, 9, 10, 11, 12, 13, 14, 15,
2260 16, 17, 18, 19, 20, 21, 22, 23,
2261 24, 25, 26, 27, 28, 29, 30, 31
2265 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2266 enum dcb_mode_enable dcb_mode,
2267 enum rte_eth_nb_tcs num_tcs,
2273 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2274 * given above, and the number of traffic classes available for use.
2276 if (dcb_mode == DCB_VT_ENABLED) {
2277 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2278 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2279 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2280 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2282 /* VMDQ+DCB RX and TX configurations */
2283 vmdq_rx_conf->enable_default_pool = 0;
2284 vmdq_rx_conf->default_pool = 0;
2285 vmdq_rx_conf->nb_queue_pools =
2286 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2287 vmdq_tx_conf->nb_queue_pools =
2288 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2290 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2291 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2292 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2293 vmdq_rx_conf->pool_map[i].pools =
2294 1 << (i % vmdq_rx_conf->nb_queue_pools);
2296 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2297 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2298 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2301 /* set DCB mode of RX and TX of multiple queues */
2302 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2303 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2305 struct rte_eth_dcb_rx_conf *rx_conf =
2306 ð_conf->rx_adv_conf.dcb_rx_conf;
2307 struct rte_eth_dcb_tx_conf *tx_conf =
2308 ð_conf->tx_adv_conf.dcb_tx_conf;
2310 rx_conf->nb_tcs = num_tcs;
2311 tx_conf->nb_tcs = num_tcs;
2313 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2314 rx_conf->dcb_tc[i] = i % num_tcs;
2315 tx_conf->dcb_tc[i] = i % num_tcs;
2317 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2318 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2319 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2323 eth_conf->dcb_capability_en =
2324 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2326 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2332 init_port_dcb_config(portid_t pid,
2333 enum dcb_mode_enable dcb_mode,
2334 enum rte_eth_nb_tcs num_tcs,
2337 struct rte_eth_conf port_conf;
2338 struct rte_port *rte_port;
2342 rte_port = &ports[pid];
2344 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2345 /* Enter DCB configuration status */
2348 port_conf.rxmode = rte_port->dev_conf.rxmode;
2349 port_conf.txmode = rte_port->dev_conf.txmode;
2351 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2352 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2355 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2358 * Write the configuration into the device.
2359 * Set the numbers of RX & TX queues to 0, so
2360 * the RX & TX queues will not be setup.
2362 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2364 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2366 /* If dev_info.vmdq_pool_base is greater than 0,
2367 * the queue id of vmdq pools is started after pf queues.
2369 if (dcb_mode == DCB_VT_ENABLED &&
2370 rte_port->dev_info.vmdq_pool_base > 0) {
2371 printf("VMDQ_DCB multi-queue mode is nonsensical"
2372 " for port %d.", pid);
2376 /* Assume the ports in testpmd have the same dcb capability
2377 * and has the same number of rxq and txq in dcb mode
2379 if (dcb_mode == DCB_VT_ENABLED) {
2380 if (rte_port->dev_info.max_vfs > 0) {
2381 nb_rxq = rte_port->dev_info.nb_rx_queues;
2382 nb_txq = rte_port->dev_info.nb_tx_queues;
2384 nb_rxq = rte_port->dev_info.max_rx_queues;
2385 nb_txq = rte_port->dev_info.max_tx_queues;
2388 /*if vt is disabled, use all pf queues */
2389 if (rte_port->dev_info.vmdq_pool_base == 0) {
2390 nb_rxq = rte_port->dev_info.max_rx_queues;
2391 nb_txq = rte_port->dev_info.max_tx_queues;
2393 nb_rxq = (queueid_t)num_tcs;
2394 nb_txq = (queueid_t)num_tcs;
2398 rx_free_thresh = 64;
2400 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2402 rxtx_port_config(rte_port);
2404 rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2405 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2406 rx_vft_set(pid, vlan_tags[i], 1);
2408 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2409 map_port_queue_stats_mapping_registers(pid, rte_port);
2411 rte_port->dcb_flag = 1;
2419 /* Configuration of Ethernet ports. */
2420 ports = rte_zmalloc("testpmd: ports",
2421 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2422 RTE_CACHE_LINE_SIZE);
2423 if (ports == NULL) {
2424 rte_exit(EXIT_FAILURE,
2425 "rte_zmalloc(%d struct rte_port) failed\n",
2441 const char clr[] = { 27, '[', '2', 'J', '\0' };
2442 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2444 /* Clear screen and move to top left */
2445 printf("%s%s", clr, top_left);
2447 printf("\nPort statistics ====================================");
2448 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2449 nic_stats_display(fwd_ports_ids[i]);
2453 signal_handler(int signum)
2455 if (signum == SIGINT || signum == SIGTERM) {
2456 printf("\nSignal %d received, preparing to exit...\n",
2458 #ifdef RTE_LIBRTE_PDUMP
2459 /* uninitialize packet capture framework */
2462 #ifdef RTE_LIBRTE_LATENCY_STATS
2463 rte_latencystats_uninit();
2466 /* Set flag to indicate the force termination. */
2468 /* exit with the expected status */
2469 signal(signum, SIG_DFL);
2470 kill(getpid(), signum);
2475 main(int argc, char** argv)
2480 signal(SIGINT, signal_handler);
2481 signal(SIGTERM, signal_handler);
2483 diag = rte_eal_init(argc, argv);
2485 rte_panic("Cannot init EAL\n");
2487 testpmd_logtype = rte_log_register("testpmd");
2488 if (testpmd_logtype < 0)
2489 rte_panic("Cannot register log type");
2490 rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2492 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2493 TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2497 #ifdef RTE_LIBRTE_PDUMP
2498 /* initialize packet capture framework */
2499 rte_pdump_init(NULL);
2502 nb_ports = (portid_t) rte_eth_dev_count();
2504 TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2506 /* allocate port structures, and init them */
2509 set_def_fwd_config();
2511 rte_panic("Empty set of forwarding logical cores - check the "
2512 "core mask supplied in the command parameters\n");
2514 /* Bitrate/latency stats disabled by default */
2515 #ifdef RTE_LIBRTE_BITRATE
2516 bitrate_enabled = 0;
2518 #ifdef RTE_LIBRTE_LATENCY_STATS
2519 latencystats_enabled = 0;
2525 launch_args_parse(argc, argv);
2527 if (tx_first && interactive)
2528 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2529 "interactive mode.\n");
2531 if (tx_first && lsc_interrupt) {
2532 printf("Warning: lsc_interrupt needs to be off when "
2533 " using tx_first. Disabling.\n");
2537 if (!nb_rxq && !nb_txq)
2538 printf("Warning: Either rx or tx queues should be non-zero\n");
2540 if (nb_rxq > 1 && nb_rxq > nb_txq)
2541 printf("Warning: nb_rxq=%d enables RSS configuration, "
2542 "but nb_txq=%d will prevent to fully test it.\n",
2546 if (start_port(RTE_PORT_ALL) != 0)
2547 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2549 /* set all ports to promiscuous mode by default */
2550 RTE_ETH_FOREACH_DEV(port_id)
2551 rte_eth_promiscuous_enable(port_id);
2553 /* Init metrics library */
2554 rte_metrics_init(rte_socket_id());
2556 #ifdef RTE_LIBRTE_LATENCY_STATS
2557 if (latencystats_enabled != 0) {
2558 int ret = rte_latencystats_init(1, NULL);
2560 printf("Warning: latencystats init()"
2561 " returned error %d\n", ret);
2562 printf("Latencystats running on lcore %d\n",
2563 latencystats_lcore_id);
2567 /* Setup bitrate stats */
2568 #ifdef RTE_LIBRTE_BITRATE
2569 if (bitrate_enabled != 0) {
2570 bitrate_data = rte_stats_bitrate_create();
2571 if (bitrate_data == NULL)
2572 rte_exit(EXIT_FAILURE,
2573 "Could not allocate bitrate data.\n");
2574 rte_stats_bitrate_reg(bitrate_data);
2578 #ifdef RTE_LIBRTE_CMDLINE
2579 if (strlen(cmdline_filename) != 0)
2580 cmdline_read_from_file(cmdline_filename);
2582 if (interactive == 1) {
2584 printf("Start automatic packet forwarding\n");
2585 start_packet_forwarding(0);
2597 printf("No commandline core given, start packet forwarding\n");
2598 start_packet_forwarding(tx_first);
2599 if (stats_period != 0) {
2600 uint64_t prev_time = 0, cur_time, diff_time = 0;
2601 uint64_t timer_period;
2603 /* Convert to number of cycles */
2604 timer_period = stats_period * rte_get_timer_hz();
2606 while (f_quit == 0) {
2607 cur_time = rte_get_timer_cycles();
2608 diff_time += cur_time - prev_time;
2610 if (diff_time >= timer_period) {
2612 /* Reset the timer */
2615 /* Sleep to avoid unnecessary checks */
2616 prev_time = cur_time;
2621 printf("Press enter to exit\n");
2622 rc = read(0, &c, 1);