4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
94 uint16_t verbose_level = 0; /**< Silent by default. */
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
100 char cmdline_filename[PATH_MAX] = {0};
103 * NUMA support configuration.
104 * When set, the NUMA support attempts to dispatch the allocation of the
105 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106 * probed ports among the CPU sockets 0 and 1.
107 * Otherwise, all memory is allocated from CPU socket 0.
109 uint8_t numa_support = 1; /**< numa enabled by default */
112 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
115 uint8_t socket_num = UMA_NO_CONFIG;
118 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
123 * Record the Ethernet address of peer target ports to which packets are
125 * Must be instantiated with the ethernet addresses of peer traffic generator
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
132 * Probed Target Environment.
134 struct rte_port *ports; /**< For all probed ethernet ports. */
135 portid_t nb_ports; /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
140 * Test Forwarding Configuration.
141 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t nb_cfg_ports; /**< Number of configured ports. */
147 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
156 * Forwarding engines.
158 struct fwd_engine * fwd_engines[] = {
167 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
169 &softnic_tm_bypass_engine,
171 #ifdef RTE_LIBRTE_IEEE1588
172 &ieee1588_fwd_engine,
177 struct fwd_config cur_fwd_config;
178 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
179 uint32_t retry_enabled;
180 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
181 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
183 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
184 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
185 * specified on command-line. */
186 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
189 * In container, it cannot terminate the process which running with 'stats-period'
190 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
195 * Configuration of packet segments used by the "txonly" processing engine.
197 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
198 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
199 TXONLY_DEF_PACKET_LEN,
201 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
203 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
204 /**< Split policy for packets to TX. */
206 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
207 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
209 /* current configuration is in DCB or not,0 means it is not in DCB mode */
210 uint8_t dcb_config = 0;
212 /* Whether the dcb is in testing status */
213 uint8_t dcb_test = 0;
216 * Configurable number of RX/TX queues.
218 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
219 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
222 * Configurable number of RX/TX ring descriptors.
224 #define RTE_TEST_RX_DESC_DEFAULT 128
225 #define RTE_TEST_TX_DESC_DEFAULT 512
226 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
227 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
229 #define RTE_PMD_PARAM_UNSET -1
231 * Configurable values of RX and TX ring threshold registers.
234 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
240 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
243 * Configurable value of RX free threshold.
245 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
248 * Configurable value of RX drop enable.
250 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
253 * Configurable value of TX free threshold.
255 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
258 * Configurable value of TX RS bit threshold.
260 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
263 * Configurable value of TX queue flags.
265 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
268 * Receive Side Scaling (RSS) configuration.
270 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
273 * Port topology configuration
275 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
278 * Avoids to flush all the RX streams before starts forwarding.
280 uint8_t no_flush_rx = 0; /* flush by default */
283 * Flow API isolated mode.
285 uint8_t flow_isolate_all;
288 * Avoids to check link status when starting/stopping a port.
290 uint8_t no_link_check = 0; /* check by default */
293 * Enable link status change notification
295 uint8_t lsc_interrupt = 1; /* enabled by default */
298 * Enable device removal notification.
300 uint8_t rmv_interrupt = 1; /* enabled by default */
303 * Display or mask ether events
304 * Default to all events except VF_MBOX
306 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
307 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
308 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
309 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
310 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
311 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
314 * NIC bypass mode configuration options.
317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
318 /* The NIC bypass watchdog timeout. */
319 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
323 #ifdef RTE_LIBRTE_LATENCY_STATS
326 * Set when latency stats is enabled in the commandline
328 uint8_t latencystats_enabled;
331 * Lcore ID to serive latency statistics.
333 lcoreid_t latencystats_lcore_id = -1;
338 * Ethernet device configuration.
340 struct rte_eth_rxmode rx_mode = {
341 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
343 .header_split = 0, /**< Header Split disabled. */
344 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
345 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
346 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
347 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
348 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
349 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
350 .hw_timestamp = 0, /**< HW timestamp enabled. */
353 struct rte_fdir_conf fdir_conf = {
354 .mode = RTE_FDIR_MODE_NONE,
355 .pballoc = RTE_FDIR_PBALLOC_64K,
356 .status = RTE_FDIR_REPORT_STATUS,
358 .vlan_tci_mask = 0x0,
360 .src_ip = 0xFFFFFFFF,
361 .dst_ip = 0xFFFFFFFF,
364 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
367 .src_port_mask = 0xFFFF,
368 .dst_port_mask = 0xFFFF,
369 .mac_addr_byte_mask = 0xFF,
370 .tunnel_type_mask = 1,
371 .tunnel_id_mask = 0xFFFFFFFF,
376 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
378 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
379 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
381 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
382 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
384 uint16_t nb_tx_queue_stats_mappings = 0;
385 uint16_t nb_rx_queue_stats_mappings = 0;
387 unsigned int num_sockets = 0;
388 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
390 #ifdef RTE_LIBRTE_BITRATE
391 /* Bitrate statistics */
392 struct rte_stats_bitrates *bitrate_data;
393 lcoreid_t bitrate_lcore_id;
394 uint8_t bitrate_enabled;
397 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
398 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
400 /* Forward function declarations */
401 static void map_port_queue_stats_mapping_registers(portid_t pi,
402 struct rte_port *port);
403 static void check_all_ports_link_status(uint32_t port_mask);
404 static int eth_event_callback(portid_t port_id,
405 enum rte_eth_event_type type,
406 void *param, void *ret_param);
409 * Check if all the ports are started.
410 * If yes, return positive value. If not, return zero.
412 static int all_ports_started(void);
414 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
415 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
418 * Helper function to check if socket is already discovered.
419 * If yes, return positive value. If not, return zero.
422 new_socket_id(unsigned int socket_id)
426 for (i = 0; i < num_sockets; i++) {
427 if (socket_ids[i] == socket_id)
434 * Setup default configuration.
437 set_default_fwd_lcores_config(void)
441 unsigned int sock_num;
444 for (i = 0; i < RTE_MAX_LCORE; i++) {
445 sock_num = rte_lcore_to_socket_id(i);
446 if (new_socket_id(sock_num)) {
447 if (num_sockets >= RTE_MAX_NUMA_NODES) {
448 rte_exit(EXIT_FAILURE,
449 "Total sockets greater than %u\n",
452 socket_ids[num_sockets++] = sock_num;
454 if (!rte_lcore_is_enabled(i))
456 if (i == rte_get_master_lcore())
458 fwd_lcores_cpuids[nb_lc++] = i;
460 nb_lcores = (lcoreid_t) nb_lc;
461 nb_cfg_lcores = nb_lcores;
466 set_def_peer_eth_addrs(void)
470 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
471 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
472 peer_eth_addrs[i].addr_bytes[5] = i;
477 set_default_fwd_ports_config(void)
482 RTE_ETH_FOREACH_DEV(pt_id)
483 fwd_ports_ids[i++] = pt_id;
485 nb_cfg_ports = nb_ports;
486 nb_fwd_ports = nb_ports;
490 set_def_fwd_config(void)
492 set_default_fwd_lcores_config();
493 set_def_peer_eth_addrs();
494 set_default_fwd_ports_config();
498 * Configuration initialisation done once at init time.
501 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
502 unsigned int socket_id)
504 char pool_name[RTE_MEMPOOL_NAMESIZE];
505 struct rte_mempool *rte_mp = NULL;
508 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
509 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
512 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
513 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
516 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
517 mb_size, (unsigned) mb_mempool_cache,
518 sizeof(struct rte_pktmbuf_pool_private),
523 if (rte_mempool_populate_anon(rte_mp) == 0) {
524 rte_mempool_free(rte_mp);
528 rte_pktmbuf_pool_init(rte_mp, NULL);
529 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
531 /* wrapper to rte_mempool_create() */
532 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
533 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
537 if (rte_mp == NULL) {
538 rte_exit(EXIT_FAILURE,
539 "Creation of mbuf pool for socket %u failed: %s\n",
540 socket_id, rte_strerror(rte_errno));
541 } else if (verbose_level > 0) {
542 rte_mempool_dump(stdout, rte_mp);
547 * Check given socket id is valid or not with NUMA mode,
548 * if valid, return 0, else return -1
551 check_socket_id(const unsigned int socket_id)
553 static int warning_once = 0;
555 if (new_socket_id(socket_id)) {
556 if (!warning_once && numa_support)
557 printf("Warning: NUMA should be configured manually by"
558 " using --port-numa-config and"
559 " --ring-numa-config parameters along with"
571 struct rte_port *port;
572 struct rte_mempool *mbp;
573 unsigned int nb_mbuf_per_pool;
575 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
576 struct rte_gro_param gro_param;
579 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
582 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
583 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
584 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
587 /* Configuration of logical cores. */
588 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
589 sizeof(struct fwd_lcore *) * nb_lcores,
590 RTE_CACHE_LINE_SIZE);
591 if (fwd_lcores == NULL) {
592 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
593 "failed\n", nb_lcores);
595 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
596 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
597 sizeof(struct fwd_lcore),
598 RTE_CACHE_LINE_SIZE);
599 if (fwd_lcores[lc_id] == NULL) {
600 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
603 fwd_lcores[lc_id]->cpuid_idx = lc_id;
606 RTE_ETH_FOREACH_DEV(pid) {
608 rte_eth_dev_info_get(pid, &port->dev_info);
611 if (port_numa[pid] != NUMA_NO_CONFIG)
612 port_per_socket[port_numa[pid]]++;
614 uint32_t socket_id = rte_eth_dev_socket_id(pid);
616 /* if socket_id is invalid, set to 0 */
617 if (check_socket_id(socket_id) < 0)
619 port_per_socket[socket_id]++;
623 /* set flag to initialize port/queue */
624 port->need_reconfig = 1;
625 port->need_reconfig_queues = 1;
629 * Create pools of mbuf.
630 * If NUMA support is disabled, create a single pool of mbuf in
631 * socket 0 memory by default.
632 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
634 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
635 * nb_txd can be configured at run time.
637 if (param_total_num_mbufs)
638 nb_mbuf_per_pool = param_total_num_mbufs;
640 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
641 (nb_lcores * mb_mempool_cache) +
642 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
643 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
649 for (i = 0; i < num_sockets; i++)
650 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
653 if (socket_num == UMA_NO_CONFIG)
654 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
656 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
662 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
663 DEV_TX_OFFLOAD_GRE_TNL_TSO;
665 * Records which Mbuf pool to use by each logical core, if needed.
667 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
668 mbp = mbuf_pool_find(
669 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
672 mbp = mbuf_pool_find(0);
673 fwd_lcores[lc_id]->mbp = mbp;
674 /* initialize GSO context */
675 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
676 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
677 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
678 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
680 fwd_lcores[lc_id]->gso_ctx.flag = 0;
683 /* Configuration of packet forwarding streams. */
684 if (init_fwd_streams() < 0)
685 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
689 /* create a gro context for each lcore */
690 gro_param.gro_types = RTE_GRO_TCP_IPV4;
691 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
692 gro_param.max_item_per_flow = MAX_PKT_BURST;
693 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
694 gro_param.socket_id = rte_lcore_to_socket_id(
695 fwd_lcores_cpuids[lc_id]);
696 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
697 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
698 rte_exit(EXIT_FAILURE,
699 "rte_gro_ctx_create() failed\n");
706 reconfig(portid_t new_port_id, unsigned socket_id)
708 struct rte_port *port;
710 /* Reconfiguration of Ethernet ports. */
711 port = &ports[new_port_id];
712 rte_eth_dev_info_get(new_port_id, &port->dev_info);
714 /* set flag to initialize port/queue */
715 port->need_reconfig = 1;
716 port->need_reconfig_queues = 1;
717 port->socket_id = socket_id;
724 init_fwd_streams(void)
727 struct rte_port *port;
728 streamid_t sm_id, nb_fwd_streams_new;
731 /* set socket id according to numa or not */
732 RTE_ETH_FOREACH_DEV(pid) {
734 if (nb_rxq > port->dev_info.max_rx_queues) {
735 printf("Fail: nb_rxq(%d) is greater than "
736 "max_rx_queues(%d)\n", nb_rxq,
737 port->dev_info.max_rx_queues);
740 if (nb_txq > port->dev_info.max_tx_queues) {
741 printf("Fail: nb_txq(%d) is greater than "
742 "max_tx_queues(%d)\n", nb_txq,
743 port->dev_info.max_tx_queues);
747 if (port_numa[pid] != NUMA_NO_CONFIG)
748 port->socket_id = port_numa[pid];
750 port->socket_id = rte_eth_dev_socket_id(pid);
752 /* if socket_id is invalid, set to 0 */
753 if (check_socket_id(port->socket_id) < 0)
758 if (socket_num == UMA_NO_CONFIG)
761 port->socket_id = socket_num;
765 q = RTE_MAX(nb_rxq, nb_txq);
767 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
770 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
771 if (nb_fwd_streams_new == nb_fwd_streams)
774 if (fwd_streams != NULL) {
775 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
776 if (fwd_streams[sm_id] == NULL)
778 rte_free(fwd_streams[sm_id]);
779 fwd_streams[sm_id] = NULL;
781 rte_free(fwd_streams);
786 nb_fwd_streams = nb_fwd_streams_new;
787 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
788 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
789 if (fwd_streams == NULL)
790 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
791 "failed\n", nb_fwd_streams);
793 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
794 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
795 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
796 if (fwd_streams[sm_id] == NULL)
797 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
804 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
806 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
808 unsigned int total_burst;
809 unsigned int nb_burst;
810 unsigned int burst_stats[3];
811 uint16_t pktnb_stats[3];
813 int burst_percent[3];
816 * First compute the total number of packet bursts and the
817 * two highest numbers of bursts of the same number of packets.
820 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
821 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
822 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
823 nb_burst = pbs->pkt_burst_spread[nb_pkt];
826 total_burst += nb_burst;
827 if (nb_burst > burst_stats[0]) {
828 burst_stats[1] = burst_stats[0];
829 pktnb_stats[1] = pktnb_stats[0];
830 burst_stats[0] = nb_burst;
831 pktnb_stats[0] = nb_pkt;
834 if (total_burst == 0)
836 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
837 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
838 burst_percent[0], (int) pktnb_stats[0]);
839 if (burst_stats[0] == total_burst) {
843 if (burst_stats[0] + burst_stats[1] == total_burst) {
844 printf(" + %d%% of %d pkts]\n",
845 100 - burst_percent[0], pktnb_stats[1]);
848 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
849 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
850 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
851 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
854 printf(" + %d%% of %d pkts + %d%% of others]\n",
855 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
857 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
860 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
862 struct rte_port *port;
865 static const char *fwd_stats_border = "----------------------";
867 port = &ports[port_id];
868 printf("\n %s Forward statistics for port %-2d %s\n",
869 fwd_stats_border, port_id, fwd_stats_border);
871 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
872 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
874 stats->ipackets, stats->imissed,
875 (uint64_t) (stats->ipackets + stats->imissed));
877 if (cur_fwd_eng == &csum_fwd_engine)
878 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
879 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
880 if ((stats->ierrors + stats->rx_nombuf) > 0) {
881 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
882 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
885 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
887 stats->opackets, port->tx_dropped,
888 (uint64_t) (stats->opackets + port->tx_dropped));
891 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
893 stats->ipackets, stats->imissed,
894 (uint64_t) (stats->ipackets + stats->imissed));
896 if (cur_fwd_eng == &csum_fwd_engine)
897 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
898 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
899 if ((stats->ierrors + stats->rx_nombuf) > 0) {
900 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
901 printf(" RX-nombufs: %14"PRIu64"\n",
905 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
907 stats->opackets, port->tx_dropped,
908 (uint64_t) (stats->opackets + port->tx_dropped));
911 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
913 pkt_burst_stats_display("RX",
914 &port->rx_stream->rx_burst_stats);
916 pkt_burst_stats_display("TX",
917 &port->tx_stream->tx_burst_stats);
920 if (port->rx_queue_stats_mapping_enabled) {
922 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
923 printf(" Stats reg %2d RX-packets:%14"PRIu64
924 " RX-errors:%14"PRIu64
925 " RX-bytes:%14"PRIu64"\n",
926 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
930 if (port->tx_queue_stats_mapping_enabled) {
931 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
932 printf(" Stats reg %2d TX-packets:%14"PRIu64
933 " TX-bytes:%14"PRIu64"\n",
934 i, stats->q_opackets[i], stats->q_obytes[i]);
938 printf(" %s--------------------------------%s\n",
939 fwd_stats_border, fwd_stats_border);
943 fwd_stream_stats_display(streamid_t stream_id)
945 struct fwd_stream *fs;
946 static const char *fwd_top_stats_border = "-------";
948 fs = fwd_streams[stream_id];
949 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
950 (fs->fwd_dropped == 0))
952 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
953 "TX Port=%2d/Queue=%2d %s\n",
954 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
955 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
956 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
957 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
959 /* if checksum mode */
960 if (cur_fwd_eng == &csum_fwd_engine) {
961 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
962 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
965 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
966 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
967 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
972 flush_fwd_rx_queues(void)
974 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
981 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
982 uint64_t timer_period;
984 /* convert to number of cycles */
985 timer_period = rte_get_timer_hz(); /* 1 second timeout */
987 for (j = 0; j < 2; j++) {
988 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
989 for (rxq = 0; rxq < nb_rxq; rxq++) {
990 port_id = fwd_ports_ids[rxp];
992 * testpmd can stuck in the below do while loop
993 * if rte_eth_rx_burst() always returns nonzero
994 * packets. So timer is added to exit this loop
995 * after 1sec timer expiry.
997 prev_tsc = rte_rdtsc();
999 nb_rx = rte_eth_rx_burst(port_id, rxq,
1000 pkts_burst, MAX_PKT_BURST);
1001 for (i = 0; i < nb_rx; i++)
1002 rte_pktmbuf_free(pkts_burst[i]);
1004 cur_tsc = rte_rdtsc();
1005 diff_tsc = cur_tsc - prev_tsc;
1006 timer_tsc += diff_tsc;
1007 } while ((nb_rx > 0) &&
1008 (timer_tsc < timer_period));
1012 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1017 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1019 struct fwd_stream **fsm;
1022 #ifdef RTE_LIBRTE_BITRATE
1023 uint64_t tics_per_1sec;
1024 uint64_t tics_datum;
1025 uint64_t tics_current;
1026 uint8_t idx_port, cnt_ports;
1028 cnt_ports = rte_eth_dev_count();
1029 tics_datum = rte_rdtsc();
1030 tics_per_1sec = rte_get_timer_hz();
1032 fsm = &fwd_streams[fc->stream_idx];
1033 nb_fs = fc->stream_nb;
1035 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1036 (*pkt_fwd)(fsm[sm_id]);
1037 #ifdef RTE_LIBRTE_BITRATE
1038 if (bitrate_enabled != 0 &&
1039 bitrate_lcore_id == rte_lcore_id()) {
1040 tics_current = rte_rdtsc();
1041 if (tics_current - tics_datum >= tics_per_1sec) {
1042 /* Periodic bitrate calculation */
1044 idx_port < cnt_ports;
1046 rte_stats_bitrate_calc(bitrate_data,
1048 tics_datum = tics_current;
1052 #ifdef RTE_LIBRTE_LATENCY_STATS
1053 if (latencystats_enabled != 0 &&
1054 latencystats_lcore_id == rte_lcore_id())
1055 rte_latencystats_update();
1058 } while (! fc->stopped);
1062 start_pkt_forward_on_core(void *fwd_arg)
1064 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1065 cur_fwd_config.fwd_eng->packet_fwd);
1070 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1071 * Used to start communication flows in network loopback test configurations.
1074 run_one_txonly_burst_on_core(void *fwd_arg)
1076 struct fwd_lcore *fwd_lc;
1077 struct fwd_lcore tmp_lcore;
1079 fwd_lc = (struct fwd_lcore *) fwd_arg;
1080 tmp_lcore = *fwd_lc;
1081 tmp_lcore.stopped = 1;
1082 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1087 * Launch packet forwarding:
1088 * - Setup per-port forwarding context.
1089 * - launch logical cores with their forwarding configuration.
1092 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1094 port_fwd_begin_t port_fwd_begin;
1099 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1100 if (port_fwd_begin != NULL) {
1101 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1102 (*port_fwd_begin)(fwd_ports_ids[i]);
1104 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1105 lc_id = fwd_lcores_cpuids[i];
1106 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1107 fwd_lcores[i]->stopped = 0;
1108 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1109 fwd_lcores[i], lc_id);
1111 printf("launch lcore %u failed - diag=%d\n",
1118 * Launch packet forwarding configuration.
1121 start_packet_forwarding(int with_tx_first)
1123 port_fwd_begin_t port_fwd_begin;
1124 port_fwd_end_t port_fwd_end;
1125 struct rte_port *port;
1130 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1131 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1133 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1134 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1136 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1137 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1138 (!nb_rxq || !nb_txq))
1139 rte_exit(EXIT_FAILURE,
1140 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1141 cur_fwd_eng->fwd_mode_name);
1143 if (all_ports_started() == 0) {
1144 printf("Not all ports were started\n");
1147 if (test_done == 0) {
1148 printf("Packet forwarding already started\n");
1152 if (init_fwd_streams() < 0) {
1153 printf("Fail from init_fwd_streams()\n");
1158 for (i = 0; i < nb_fwd_ports; i++) {
1159 pt_id = fwd_ports_ids[i];
1160 port = &ports[pt_id];
1161 if (!port->dcb_flag) {
1162 printf("In DCB mode, all forwarding ports must "
1163 "be configured in this mode.\n");
1167 if (nb_fwd_lcores == 1) {
1168 printf("In DCB mode,the nb forwarding cores "
1169 "should be larger than 1.\n");
1176 flush_fwd_rx_queues();
1179 pkt_fwd_config_display(&cur_fwd_config);
1180 rxtx_config_display();
1182 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183 pt_id = fwd_ports_ids[i];
1184 port = &ports[pt_id];
1185 rte_eth_stats_get(pt_id, &port->stats);
1186 port->tx_dropped = 0;
1188 map_port_queue_stats_mapping_registers(pt_id, port);
1190 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1191 fwd_streams[sm_id]->rx_packets = 0;
1192 fwd_streams[sm_id]->tx_packets = 0;
1193 fwd_streams[sm_id]->fwd_dropped = 0;
1194 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1195 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1197 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1198 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1199 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1200 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1201 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1203 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1204 fwd_streams[sm_id]->core_cycles = 0;
1207 if (with_tx_first) {
1208 port_fwd_begin = tx_only_engine.port_fwd_begin;
1209 if (port_fwd_begin != NULL) {
1210 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1211 (*port_fwd_begin)(fwd_ports_ids[i]);
1213 while (with_tx_first--) {
1214 launch_packet_forwarding(
1215 run_one_txonly_burst_on_core);
1216 rte_eal_mp_wait_lcore();
1218 port_fwd_end = tx_only_engine.port_fwd_end;
1219 if (port_fwd_end != NULL) {
1220 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1221 (*port_fwd_end)(fwd_ports_ids[i]);
1224 launch_packet_forwarding(start_pkt_forward_on_core);
1228 stop_packet_forwarding(void)
1230 struct rte_eth_stats stats;
1231 struct rte_port *port;
1232 port_fwd_end_t port_fwd_end;
1237 uint64_t total_recv;
1238 uint64_t total_xmit;
1239 uint64_t total_rx_dropped;
1240 uint64_t total_tx_dropped;
1241 uint64_t total_rx_nombuf;
1242 uint64_t tx_dropped;
1243 uint64_t rx_bad_ip_csum;
1244 uint64_t rx_bad_l4_csum;
1245 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1246 uint64_t fwd_cycles;
1249 static const char *acc_stats_border = "+++++++++++++++";
1252 printf("Packet forwarding not started\n");
1255 printf("Telling cores to stop...");
1256 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1257 fwd_lcores[lc_id]->stopped = 1;
1258 printf("\nWaiting for lcores to finish...\n");
1259 rte_eal_mp_wait_lcore();
1260 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1261 if (port_fwd_end != NULL) {
1262 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1263 pt_id = fwd_ports_ids[i];
1264 (*port_fwd_end)(pt_id);
1267 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1270 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1271 if (cur_fwd_config.nb_fwd_streams >
1272 cur_fwd_config.nb_fwd_ports) {
1273 fwd_stream_stats_display(sm_id);
1274 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1275 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1277 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1279 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1282 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1283 tx_dropped = (uint64_t) (tx_dropped +
1284 fwd_streams[sm_id]->fwd_dropped);
1285 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1288 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1289 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1290 fwd_streams[sm_id]->rx_bad_ip_csum);
1291 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1295 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1296 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1297 fwd_streams[sm_id]->rx_bad_l4_csum);
1298 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1301 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1302 fwd_cycles = (uint64_t) (fwd_cycles +
1303 fwd_streams[sm_id]->core_cycles);
1308 total_rx_dropped = 0;
1309 total_tx_dropped = 0;
1310 total_rx_nombuf = 0;
1311 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1312 pt_id = fwd_ports_ids[i];
1314 port = &ports[pt_id];
1315 rte_eth_stats_get(pt_id, &stats);
1316 stats.ipackets -= port->stats.ipackets;
1317 port->stats.ipackets = 0;
1318 stats.opackets -= port->stats.opackets;
1319 port->stats.opackets = 0;
1320 stats.ibytes -= port->stats.ibytes;
1321 port->stats.ibytes = 0;
1322 stats.obytes -= port->stats.obytes;
1323 port->stats.obytes = 0;
1324 stats.imissed -= port->stats.imissed;
1325 port->stats.imissed = 0;
1326 stats.oerrors -= port->stats.oerrors;
1327 port->stats.oerrors = 0;
1328 stats.rx_nombuf -= port->stats.rx_nombuf;
1329 port->stats.rx_nombuf = 0;
1331 total_recv += stats.ipackets;
1332 total_xmit += stats.opackets;
1333 total_rx_dropped += stats.imissed;
1334 total_tx_dropped += port->tx_dropped;
1335 total_rx_nombuf += stats.rx_nombuf;
1337 fwd_port_stats_display(pt_id, &stats);
1340 printf("\n %s Accumulated forward statistics for all ports"
1342 acc_stats_border, acc_stats_border);
1343 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1345 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1347 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1348 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1349 if (total_rx_nombuf > 0)
1350 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1351 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1353 acc_stats_border, acc_stats_border);
1354 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1356 printf("\n CPU cycles/packet=%u (total cycles="
1357 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1358 (unsigned int)(fwd_cycles / total_recv),
1359 fwd_cycles, total_recv);
1361 printf("\nDone.\n");
1366 dev_set_link_up(portid_t pid)
1368 if (rte_eth_dev_set_link_up(pid) < 0)
1369 printf("\nSet link up fail.\n");
1373 dev_set_link_down(portid_t pid)
1375 if (rte_eth_dev_set_link_down(pid) < 0)
1376 printf("\nSet link down fail.\n");
1380 all_ports_started(void)
1383 struct rte_port *port;
1385 RTE_ETH_FOREACH_DEV(pi) {
1387 /* Check if there is a port which is not started */
1388 if ((port->port_status != RTE_PORT_STARTED) &&
1389 (port->slave_flag == 0))
1393 /* No port is not started */
1398 all_ports_stopped(void)
1401 struct rte_port *port;
1403 RTE_ETH_FOREACH_DEV(pi) {
1405 if ((port->port_status != RTE_PORT_STOPPED) &&
1406 (port->slave_flag == 0))
1414 port_is_started(portid_t port_id)
1416 if (port_id_is_invalid(port_id, ENABLED_WARN))
1419 if (ports[port_id].port_status != RTE_PORT_STARTED)
1426 port_is_closed(portid_t port_id)
1428 if (port_id_is_invalid(port_id, ENABLED_WARN))
1431 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1438 start_port(portid_t pid)
1440 int diag, need_check_link_status = -1;
1443 struct rte_port *port;
1444 struct ether_addr mac_addr;
1445 enum rte_eth_event_type event_type;
1447 if (port_id_is_invalid(pid, ENABLED_WARN))
1452 RTE_ETH_FOREACH_DEV(pi) {
1453 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1456 need_check_link_status = 0;
1458 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1459 RTE_PORT_HANDLING) == 0) {
1460 printf("Port %d is now not stopped\n", pi);
1464 if (port->need_reconfig > 0) {
1465 port->need_reconfig = 0;
1467 if (flow_isolate_all) {
1468 int ret = port_flow_isolate(pi, 1);
1470 printf("Failed to apply isolated"
1471 " mode on port %d\n", pi);
1476 printf("Configuring Port %d (socket %u)\n", pi,
1478 /* configure port */
1479 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1482 if (rte_atomic16_cmpset(&(port->port_status),
1483 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1484 printf("Port %d can not be set back "
1485 "to stopped\n", pi);
1486 printf("Fail to configure port %d\n", pi);
1487 /* try to reconfigure port next time */
1488 port->need_reconfig = 1;
1492 if (port->need_reconfig_queues > 0) {
1493 port->need_reconfig_queues = 0;
1494 /* setup tx queues */
1495 for (qi = 0; qi < nb_txq; qi++) {
1496 if ((numa_support) &&
1497 (txring_numa[pi] != NUMA_NO_CONFIG))
1498 diag = rte_eth_tx_queue_setup(pi, qi,
1499 nb_txd,txring_numa[pi],
1502 diag = rte_eth_tx_queue_setup(pi, qi,
1503 nb_txd,port->socket_id,
1509 /* Fail to setup tx queue, return */
1510 if (rte_atomic16_cmpset(&(port->port_status),
1512 RTE_PORT_STOPPED) == 0)
1513 printf("Port %d can not be set back "
1514 "to stopped\n", pi);
1515 printf("Fail to configure port %d tx queues\n", pi);
1516 /* try to reconfigure queues next time */
1517 port->need_reconfig_queues = 1;
1520 /* setup rx queues */
1521 for (qi = 0; qi < nb_rxq; qi++) {
1522 if ((numa_support) &&
1523 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1524 struct rte_mempool * mp =
1525 mbuf_pool_find(rxring_numa[pi]);
1527 printf("Failed to setup RX queue:"
1528 "No mempool allocation"
1529 " on the socket %d\n",
1534 diag = rte_eth_rx_queue_setup(pi, qi,
1535 nb_rxd,rxring_numa[pi],
1536 &(port->rx_conf),mp);
1538 struct rte_mempool *mp =
1539 mbuf_pool_find(port->socket_id);
1541 printf("Failed to setup RX queue:"
1542 "No mempool allocation"
1543 " on the socket %d\n",
1547 diag = rte_eth_rx_queue_setup(pi, qi,
1548 nb_rxd,port->socket_id,
1549 &(port->rx_conf), mp);
1554 /* Fail to setup rx queue, return */
1555 if (rte_atomic16_cmpset(&(port->port_status),
1557 RTE_PORT_STOPPED) == 0)
1558 printf("Port %d can not be set back "
1559 "to stopped\n", pi);
1560 printf("Fail to configure port %d rx queues\n", pi);
1561 /* try to reconfigure queues next time */
1562 port->need_reconfig_queues = 1;
1567 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1568 event_type < RTE_ETH_EVENT_MAX;
1570 diag = rte_eth_dev_callback_register(pi,
1575 printf("Failed to setup even callback for event %d\n",
1582 if (rte_eth_dev_start(pi) < 0) {
1583 printf("Fail to start port %d\n", pi);
1585 /* Fail to setup rx queue, return */
1586 if (rte_atomic16_cmpset(&(port->port_status),
1587 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1588 printf("Port %d can not be set back to "
1593 if (rte_atomic16_cmpset(&(port->port_status),
1594 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1595 printf("Port %d can not be set into started\n", pi);
1597 rte_eth_macaddr_get(pi, &mac_addr);
1598 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1599 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1600 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1601 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1603 /* at least one port started, need checking link status */
1604 need_check_link_status = 1;
1607 if (need_check_link_status == 1 && !no_link_check)
1608 check_all_ports_link_status(RTE_PORT_ALL);
1609 else if (need_check_link_status == 0)
1610 printf("Please stop the ports first\n");
1617 stop_port(portid_t pid)
1620 struct rte_port *port;
1621 int need_check_link_status = 0;
1628 if (port_id_is_invalid(pid, ENABLED_WARN))
1631 printf("Stopping ports...\n");
1633 RTE_ETH_FOREACH_DEV(pi) {
1634 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1637 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1638 printf("Please remove port %d from forwarding configuration.\n", pi);
1642 if (port_is_bonding_slave(pi)) {
1643 printf("Please remove port %d from bonded device.\n", pi);
1648 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1649 RTE_PORT_HANDLING) == 0)
1652 rte_eth_dev_stop(pi);
1654 if (rte_atomic16_cmpset(&(port->port_status),
1655 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1656 printf("Port %d can not be set into stopped\n", pi);
1657 need_check_link_status = 1;
1659 if (need_check_link_status && !no_link_check)
1660 check_all_ports_link_status(RTE_PORT_ALL);
1666 close_port(portid_t pid)
1669 struct rte_port *port;
1671 if (port_id_is_invalid(pid, ENABLED_WARN))
1674 printf("Closing ports...\n");
1676 RTE_ETH_FOREACH_DEV(pi) {
1677 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1680 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1681 printf("Please remove port %d from forwarding configuration.\n", pi);
1685 if (port_is_bonding_slave(pi)) {
1686 printf("Please remove port %d from bonded device.\n", pi);
1691 if (rte_atomic16_cmpset(&(port->port_status),
1692 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1693 printf("Port %d is already closed\n", pi);
1697 if (rte_atomic16_cmpset(&(port->port_status),
1698 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1699 printf("Port %d is now not stopped\n", pi);
1703 if (port->flow_list)
1704 port_flow_flush(pi);
1705 rte_eth_dev_close(pi);
1707 if (rte_atomic16_cmpset(&(port->port_status),
1708 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1709 printf("Port %d cannot be set to closed\n", pi);
1716 reset_port(portid_t pid)
1720 struct rte_port *port;
1722 if (port_id_is_invalid(pid, ENABLED_WARN))
1725 printf("Resetting ports...\n");
1727 RTE_ETH_FOREACH_DEV(pi) {
1728 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1731 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1732 printf("Please remove port %d from forwarding "
1733 "configuration.\n", pi);
1737 if (port_is_bonding_slave(pi)) {
1738 printf("Please remove port %d from bonded device.\n",
1743 diag = rte_eth_dev_reset(pi);
1746 port->need_reconfig = 1;
1747 port->need_reconfig_queues = 1;
1749 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1757 attach_port(char *identifier)
1760 unsigned int socket_id;
1762 printf("Attaching a new port...\n");
1764 if (identifier == NULL) {
1765 printf("Invalid parameters are specified\n");
1769 if (rte_eth_dev_attach(identifier, &pi))
1772 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1773 /* if socket_id is invalid, set to 0 */
1774 if (check_socket_id(socket_id) < 0)
1776 reconfig(pi, socket_id);
1777 rte_eth_promiscuous_enable(pi);
1779 nb_ports = rte_eth_dev_count();
1781 ports[pi].port_status = RTE_PORT_STOPPED;
1783 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1788 detach_port(portid_t port_id)
1790 char name[RTE_ETH_NAME_MAX_LEN];
1792 printf("Detaching a port...\n");
1794 if (!port_is_closed(port_id)) {
1795 printf("Please close port first\n");
1799 if (ports[port_id].flow_list)
1800 port_flow_flush(port_id);
1802 if (rte_eth_dev_detach(port_id, name)) {
1803 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1807 nb_ports = rte_eth_dev_count();
1809 printf("Port '%s' is detached. Now total ports is %d\n",
1821 stop_packet_forwarding();
1823 if (ports != NULL) {
1825 RTE_ETH_FOREACH_DEV(pt_id) {
1826 printf("\nShutting down port %d...\n", pt_id);
1832 printf("\nBye...\n");
1835 typedef void (*cmd_func_t)(void);
1836 struct pmd_test_command {
1837 const char *cmd_name;
1838 cmd_func_t cmd_func;
1841 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1843 /* Check the link status of all ports in up to 9s, and print them finally */
1845 check_all_ports_link_status(uint32_t port_mask)
1847 #define CHECK_INTERVAL 100 /* 100ms */
1848 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1850 uint8_t count, all_ports_up, print_flag = 0;
1851 struct rte_eth_link link;
1853 printf("Checking link statuses...\n");
1855 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1857 RTE_ETH_FOREACH_DEV(portid) {
1858 if ((port_mask & (1 << portid)) == 0)
1860 memset(&link, 0, sizeof(link));
1861 rte_eth_link_get_nowait(portid, &link);
1862 /* print link status if flag set */
1863 if (print_flag == 1) {
1864 if (link.link_status)
1866 "Port%d Link Up. speed %u Mbps- %s\n",
1867 portid, link.link_speed,
1868 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1869 ("full-duplex") : ("half-duplex\n"));
1871 printf("Port %d Link Down\n", portid);
1874 /* clear all_ports_up flag if any link down */
1875 if (link.link_status == ETH_LINK_DOWN) {
1880 /* after finally printing all link status, get out */
1881 if (print_flag == 1)
1884 if (all_ports_up == 0) {
1886 rte_delay_ms(CHECK_INTERVAL);
1889 /* set the print_flag if all ports up or timeout */
1890 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1900 rmv_event_callback(void *arg)
1902 struct rte_eth_dev *dev;
1903 portid_t port_id = (intptr_t)arg;
1905 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1906 dev = &rte_eth_devices[port_id];
1909 close_port(port_id);
1910 printf("removing device %s\n", dev->device->name);
1911 if (rte_eal_dev_detach(dev->device))
1912 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1916 /* This function is used by the interrupt thread */
1918 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1921 static const char * const event_desc[] = {
1922 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1923 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1924 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1925 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1926 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1927 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1928 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1929 [RTE_ETH_EVENT_MAX] = NULL,
1932 RTE_SET_USED(param);
1933 RTE_SET_USED(ret_param);
1935 if (type >= RTE_ETH_EVENT_MAX) {
1936 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1937 port_id, __func__, type);
1939 } else if (event_print_mask & (UINT32_C(1) << type)) {
1940 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1946 case RTE_ETH_EVENT_INTR_RMV:
1947 if (rte_eal_alarm_set(100000,
1948 rmv_event_callback, (void *)(intptr_t)port_id))
1949 fprintf(stderr, "Could not set up deferred device removal\n");
1958 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1962 uint8_t mapping_found = 0;
1964 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1965 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1966 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1967 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1968 tx_queue_stats_mappings[i].queue_id,
1969 tx_queue_stats_mappings[i].stats_counter_id);
1976 port->tx_queue_stats_mapping_enabled = 1;
1981 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1985 uint8_t mapping_found = 0;
1987 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1988 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1989 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1990 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1991 rx_queue_stats_mappings[i].queue_id,
1992 rx_queue_stats_mappings[i].stats_counter_id);
1999 port->rx_queue_stats_mapping_enabled = 1;
2004 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2008 diag = set_tx_queue_stats_mapping_registers(pi, port);
2010 if (diag == -ENOTSUP) {
2011 port->tx_queue_stats_mapping_enabled = 0;
2012 printf("TX queue stats mapping not supported port id=%d\n", pi);
2015 rte_exit(EXIT_FAILURE,
2016 "set_tx_queue_stats_mapping_registers "
2017 "failed for port id=%d diag=%d\n",
2021 diag = set_rx_queue_stats_mapping_registers(pi, port);
2023 if (diag == -ENOTSUP) {
2024 port->rx_queue_stats_mapping_enabled = 0;
2025 printf("RX queue stats mapping not supported port id=%d\n", pi);
2028 rte_exit(EXIT_FAILURE,
2029 "set_rx_queue_stats_mapping_registers "
2030 "failed for port id=%d diag=%d\n",
2036 rxtx_port_config(struct rte_port *port)
2038 port->rx_conf = port->dev_info.default_rxconf;
2039 port->tx_conf = port->dev_info.default_txconf;
2041 /* Check if any RX/TX parameters have been passed */
2042 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2043 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2045 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2046 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2048 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2049 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2051 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2052 port->rx_conf.rx_free_thresh = rx_free_thresh;
2054 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2055 port->rx_conf.rx_drop_en = rx_drop_en;
2057 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2058 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2060 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2061 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2063 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2064 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2066 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2067 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2069 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2070 port->tx_conf.tx_free_thresh = tx_free_thresh;
2072 if (txq_flags != RTE_PMD_PARAM_UNSET)
2073 port->tx_conf.txq_flags = txq_flags;
2077 init_port_config(void)
2080 struct rte_port *port;
2082 RTE_ETH_FOREACH_DEV(pid) {
2084 port->dev_conf.rxmode = rx_mode;
2085 port->dev_conf.fdir_conf = fdir_conf;
2087 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2088 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2090 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2091 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2094 if (port->dcb_flag == 0) {
2095 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2096 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2098 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2101 rxtx_port_config(port);
2103 rte_eth_macaddr_get(pid, &port->eth_addr);
2105 map_port_queue_stats_mapping_registers(pid, port);
2106 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2107 rte_pmd_ixgbe_bypass_init(pid);
2110 if (lsc_interrupt &&
2111 (rte_eth_devices[pid].data->dev_flags &
2112 RTE_ETH_DEV_INTR_LSC))
2113 port->dev_conf.intr_conf.lsc = 1;
2114 if (rmv_interrupt &&
2115 (rte_eth_devices[pid].data->dev_flags &
2116 RTE_ETH_DEV_INTR_RMV))
2117 port->dev_conf.intr_conf.rmv = 1;
2119 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2120 /* Detect softnic port */
2121 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2122 port->softnic_enable = 1;
2123 memset(&port->softport, 0, sizeof(struct softnic_port));
2125 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2126 port->softport.tm_flag = 1;
2132 void set_port_slave_flag(portid_t slave_pid)
2134 struct rte_port *port;
2136 port = &ports[slave_pid];
2137 port->slave_flag = 1;
2140 void clear_port_slave_flag(portid_t slave_pid)
2142 struct rte_port *port;
2144 port = &ports[slave_pid];
2145 port->slave_flag = 0;
2148 uint8_t port_is_bonding_slave(portid_t slave_pid)
2150 struct rte_port *port;
2152 port = &ports[slave_pid];
2153 return port->slave_flag;
2156 const uint16_t vlan_tags[] = {
2157 0, 1, 2, 3, 4, 5, 6, 7,
2158 8, 9, 10, 11, 12, 13, 14, 15,
2159 16, 17, 18, 19, 20, 21, 22, 23,
2160 24, 25, 26, 27, 28, 29, 30, 31
2164 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2165 enum dcb_mode_enable dcb_mode,
2166 enum rte_eth_nb_tcs num_tcs,
2172 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2173 * given above, and the number of traffic classes available for use.
2175 if (dcb_mode == DCB_VT_ENABLED) {
2176 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2177 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2178 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2179 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2181 /* VMDQ+DCB RX and TX configurations */
2182 vmdq_rx_conf->enable_default_pool = 0;
2183 vmdq_rx_conf->default_pool = 0;
2184 vmdq_rx_conf->nb_queue_pools =
2185 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2186 vmdq_tx_conf->nb_queue_pools =
2187 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2189 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2190 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2191 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2192 vmdq_rx_conf->pool_map[i].pools =
2193 1 << (i % vmdq_rx_conf->nb_queue_pools);
2195 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2196 vmdq_rx_conf->dcb_tc[i] = i;
2197 vmdq_tx_conf->dcb_tc[i] = i;
2200 /* set DCB mode of RX and TX of multiple queues */
2201 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2202 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2204 struct rte_eth_dcb_rx_conf *rx_conf =
2205 ð_conf->rx_adv_conf.dcb_rx_conf;
2206 struct rte_eth_dcb_tx_conf *tx_conf =
2207 ð_conf->tx_adv_conf.dcb_tx_conf;
2209 rx_conf->nb_tcs = num_tcs;
2210 tx_conf->nb_tcs = num_tcs;
2212 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2213 rx_conf->dcb_tc[i] = i % num_tcs;
2214 tx_conf->dcb_tc[i] = i % num_tcs;
2216 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2217 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2218 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2222 eth_conf->dcb_capability_en =
2223 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2225 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2231 init_port_dcb_config(portid_t pid,
2232 enum dcb_mode_enable dcb_mode,
2233 enum rte_eth_nb_tcs num_tcs,
2236 struct rte_eth_conf port_conf;
2237 struct rte_port *rte_port;
2241 rte_port = &ports[pid];
2243 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2244 /* Enter DCB configuration status */
2247 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2248 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2251 port_conf.rxmode.hw_vlan_filter = 1;
2254 * Write the configuration into the device.
2255 * Set the numbers of RX & TX queues to 0, so
2256 * the RX & TX queues will not be setup.
2258 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2260 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2262 /* If dev_info.vmdq_pool_base is greater than 0,
2263 * the queue id of vmdq pools is started after pf queues.
2265 if (dcb_mode == DCB_VT_ENABLED &&
2266 rte_port->dev_info.vmdq_pool_base > 0) {
2267 printf("VMDQ_DCB multi-queue mode is nonsensical"
2268 " for port %d.", pid);
2272 /* Assume the ports in testpmd have the same dcb capability
2273 * and has the same number of rxq and txq in dcb mode
2275 if (dcb_mode == DCB_VT_ENABLED) {
2276 if (rte_port->dev_info.max_vfs > 0) {
2277 nb_rxq = rte_port->dev_info.nb_rx_queues;
2278 nb_txq = rte_port->dev_info.nb_tx_queues;
2280 nb_rxq = rte_port->dev_info.max_rx_queues;
2281 nb_txq = rte_port->dev_info.max_tx_queues;
2284 /*if vt is disabled, use all pf queues */
2285 if (rte_port->dev_info.vmdq_pool_base == 0) {
2286 nb_rxq = rte_port->dev_info.max_rx_queues;
2287 nb_txq = rte_port->dev_info.max_tx_queues;
2289 nb_rxq = (queueid_t)num_tcs;
2290 nb_txq = (queueid_t)num_tcs;
2294 rx_free_thresh = 64;
2296 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2298 rxtx_port_config(rte_port);
2300 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2301 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2302 rx_vft_set(pid, vlan_tags[i], 1);
2304 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2305 map_port_queue_stats_mapping_registers(pid, rte_port);
2307 rte_port->dcb_flag = 1;
2315 /* Configuration of Ethernet ports. */
2316 ports = rte_zmalloc("testpmd: ports",
2317 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2318 RTE_CACHE_LINE_SIZE);
2319 if (ports == NULL) {
2320 rte_exit(EXIT_FAILURE,
2321 "rte_zmalloc(%d struct rte_port) failed\n",
2337 const char clr[] = { 27, '[', '2', 'J', '\0' };
2338 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2340 /* Clear screen and move to top left */
2341 printf("%s%s", clr, top_left);
2343 printf("\nPort statistics ====================================");
2344 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2345 nic_stats_display(fwd_ports_ids[i]);
2349 signal_handler(int signum)
2351 if (signum == SIGINT || signum == SIGTERM) {
2352 printf("\nSignal %d received, preparing to exit...\n",
2354 #ifdef RTE_LIBRTE_PDUMP
2355 /* uninitialize packet capture framework */
2358 #ifdef RTE_LIBRTE_LATENCY_STATS
2359 rte_latencystats_uninit();
2362 /* Set flag to indicate the force termination. */
2364 /* exit with the expected status */
2365 signal(signum, SIG_DFL);
2366 kill(getpid(), signum);
2371 main(int argc, char** argv)
2376 signal(SIGINT, signal_handler);
2377 signal(SIGTERM, signal_handler);
2379 diag = rte_eal_init(argc, argv);
2381 rte_panic("Cannot init EAL\n");
2383 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2384 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2388 #ifdef RTE_LIBRTE_PDUMP
2389 /* initialize packet capture framework */
2390 rte_pdump_init(NULL);
2393 nb_ports = (portid_t) rte_eth_dev_count();
2395 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2397 /* allocate port structures, and init them */
2400 set_def_fwd_config();
2402 rte_panic("Empty set of forwarding logical cores - check the "
2403 "core mask supplied in the command parameters\n");
2405 /* Bitrate/latency stats disabled by default */
2406 #ifdef RTE_LIBRTE_BITRATE
2407 bitrate_enabled = 0;
2409 #ifdef RTE_LIBRTE_LATENCY_STATS
2410 latencystats_enabled = 0;
2416 launch_args_parse(argc, argv);
2418 if (tx_first && interactive)
2419 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2420 "interactive mode.\n");
2422 if (tx_first && lsc_interrupt) {
2423 printf("Warning: lsc_interrupt needs to be off when "
2424 " using tx_first. Disabling.\n");
2428 if (!nb_rxq && !nb_txq)
2429 printf("Warning: Either rx or tx queues should be non-zero\n");
2431 if (nb_rxq > 1 && nb_rxq > nb_txq)
2432 printf("Warning: nb_rxq=%d enables RSS configuration, "
2433 "but nb_txq=%d will prevent to fully test it.\n",
2437 if (start_port(RTE_PORT_ALL) != 0)
2438 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2440 /* set all ports to promiscuous mode by default */
2441 RTE_ETH_FOREACH_DEV(port_id)
2442 rte_eth_promiscuous_enable(port_id);
2444 /* Init metrics library */
2445 rte_metrics_init(rte_socket_id());
2447 #ifdef RTE_LIBRTE_LATENCY_STATS
2448 if (latencystats_enabled != 0) {
2449 int ret = rte_latencystats_init(1, NULL);
2451 printf("Warning: latencystats init()"
2452 " returned error %d\n", ret);
2453 printf("Latencystats running on lcore %d\n",
2454 latencystats_lcore_id);
2458 /* Setup bitrate stats */
2459 #ifdef RTE_LIBRTE_BITRATE
2460 if (bitrate_enabled != 0) {
2461 bitrate_data = rte_stats_bitrate_create();
2462 if (bitrate_data == NULL)
2463 rte_exit(EXIT_FAILURE,
2464 "Could not allocate bitrate data.\n");
2465 rte_stats_bitrate_reg(bitrate_data);
2469 #ifdef RTE_LIBRTE_CMDLINE
2470 if (strlen(cmdline_filename) != 0)
2471 cmdline_read_from_file(cmdline_filename);
2473 if (interactive == 1) {
2475 printf("Start automatic packet forwarding\n");
2476 start_packet_forwarding(0);
2488 printf("No commandline core given, start packet forwarding\n");
2489 start_packet_forwarding(tx_first);
2490 if (stats_period != 0) {
2491 uint64_t prev_time = 0, cur_time, diff_time = 0;
2492 uint64_t timer_period;
2494 /* Convert to number of cycles */
2495 timer_period = stats_period * rte_get_timer_hz();
2497 while (f_quit == 0) {
2498 cur_time = rte_get_timer_cycles();
2499 diff_time += cur_time - prev_time;
2501 if (diff_time >= timer_period) {
2503 /* Reset the timer */
2506 /* Sleep to avoid unnecessary checks */
2507 prev_time = cur_time;
2512 printf("Press enter to exit\n");
2513 rc = read(0, &c, 1);