4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
78 uint16_t verbose_level = 0; /**< Silent by default. */
80 /* use master core for command line ? */
81 uint8_t interactive = 0;
84 * NUMA support configuration.
85 * When set, the NUMA support attempts to dispatch the allocation of the
86 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87 * probed ports among the CPU sockets 0 and 1.
88 * Otherwise, all memory is allocated from CPU socket 0.
90 uint8_t numa_support = 0; /**< No numa support by default */
93 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
96 uint8_t socket_num = UMA_NO_CONFIG;
99 * Record the Ethernet address of peer target ports to which packets are
101 * Must be instanciated with the ethernet addresses of peer traffic generator
104 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105 portid_t nb_peer_eth_addrs = 0;
108 * Probed Target Environment.
110 struct rte_port *ports; /**< For all probed ethernet ports. */
111 portid_t nb_ports; /**< Number of probed ethernet ports. */
112 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
116 * Test Forwarding Configuration.
117 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
120 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122 portid_t nb_cfg_ports; /**< Number of configured ports. */
123 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
125 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
128 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
132 * Forwarding engines.
134 struct fwd_engine * fwd_engines[] = {
140 #ifdef RTE_LIBRTE_IEEE1588
141 &ieee1588_fwd_engine,
146 struct fwd_config cur_fwd_config;
147 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
149 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
151 * specified on command-line. */
154 * Configuration of packet segments used by the "txonly" processing engine.
156 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158 TXONLY_DEF_PACKET_LEN,
160 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
162 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
165 /* current configuration is in DCB or not,0 means it is not in DCB mode */
166 uint8_t dcb_config = 0;
168 /* Whether the dcb is in testing status */
169 uint8_t dcb_test = 0;
171 /* DCB on and VT on mapping is default */
172 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
175 * Configurable number of RX/TX queues.
177 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
181 * Configurable number of RX/TX ring descriptors.
183 #define RTE_TEST_RX_DESC_DEFAULT 128
184 #define RTE_TEST_TX_DESC_DEFAULT 512
185 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
189 * Configurable values of RX and TX ring threshold registers.
191 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
195 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
199 struct rte_eth_thresh rx_thresh = {
200 .pthresh = RX_PTHRESH,
201 .hthresh = RX_HTHRESH,
202 .wthresh = RX_WTHRESH,
205 struct rte_eth_thresh tx_thresh = {
206 .pthresh = TX_PTHRESH,
207 .hthresh = TX_HTHRESH,
208 .wthresh = TX_WTHRESH,
212 * Configurable value of RX free threshold.
214 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
217 * Configurable value of RX drop enable.
219 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
222 * Configurable value of TX free threshold.
224 uint16_t tx_free_thresh = 0; /* Use default values. */
227 * Configurable value of TX RS bit threshold.
229 uint16_t tx_rs_thresh = 0; /* Use default values. */
232 * Configurable value of TX queue flags.
234 uint32_t txq_flags = 0; /* No flags set. */
237 * Receive Side Scaling (RSS) configuration.
239 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
242 * Port topology configuration
244 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
247 * Avoids to flush all the RX streams before starts forwarding.
249 uint8_t no_flush_rx = 0; /* flush by default */
252 * NIC bypass mode configuration options.
254 #ifdef RTE_NIC_BYPASS
256 /* The NIC bypass watchdog timeout. */
257 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
262 * Ethernet device configuration.
264 struct rte_eth_rxmode rx_mode = {
265 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
267 .header_split = 0, /**< Header Split disabled. */
268 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
269 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
270 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
271 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
272 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
273 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
276 struct rte_fdir_conf fdir_conf = {
277 .mode = RTE_FDIR_MODE_NONE,
278 .pballoc = RTE_FDIR_PBALLOC_64K,
279 .status = RTE_FDIR_REPORT_STATUS,
280 .flexbytes_offset = 0x6,
284 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
286 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
287 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
289 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
290 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
292 uint16_t nb_tx_queue_stats_mappings = 0;
293 uint16_t nb_rx_queue_stats_mappings = 0;
295 /* Forward function declarations */
296 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
297 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
300 * Check if all the ports are started.
301 * If yes, return positive value. If not, return zero.
303 static int all_ports_started(void);
306 * Setup default configuration.
309 set_default_fwd_lcores_config(void)
315 for (i = 0; i < RTE_MAX_LCORE; i++) {
316 if (! rte_lcore_is_enabled(i))
318 if (i == rte_get_master_lcore())
320 fwd_lcores_cpuids[nb_lc++] = i;
322 nb_lcores = (lcoreid_t) nb_lc;
323 nb_cfg_lcores = nb_lcores;
328 set_def_peer_eth_addrs(void)
332 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
333 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
334 peer_eth_addrs[i].addr_bytes[5] = i;
339 set_default_fwd_ports_config(void)
343 for (pt_id = 0; pt_id < nb_ports; pt_id++)
344 fwd_ports_ids[pt_id] = pt_id;
346 nb_cfg_ports = nb_ports;
347 nb_fwd_ports = nb_ports;
351 set_def_fwd_config(void)
353 set_default_fwd_lcores_config();
354 set_def_peer_eth_addrs();
355 set_default_fwd_ports_config();
359 * Configuration initialisation done once at init time.
361 struct mbuf_ctor_arg {
362 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
363 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
366 struct mbuf_pool_ctor_arg {
367 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
371 testpmd_mbuf_ctor(struct rte_mempool *mp,
374 __attribute__((unused)) unsigned i)
376 struct mbuf_ctor_arg *mb_ctor_arg;
379 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
380 mb = (struct rte_mbuf *) raw_mbuf;
382 mb->type = RTE_MBUF_PKT;
384 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
385 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
386 mb_ctor_arg->seg_buf_offset);
387 mb->buf_len = mb_ctor_arg->seg_buf_size;
388 mb->type = RTE_MBUF_PKT;
390 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
392 mb->pkt.vlan_macip.data = 0;
393 mb->pkt.hash.rss = 0;
397 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
400 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
401 struct rte_pktmbuf_pool_private *mbp_priv;
403 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
404 printf("%s(%s) private_data_size %d < %d\n",
405 __func__, mp->name, (int) mp->private_data_size,
406 (int) sizeof(struct rte_pktmbuf_pool_private));
409 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
410 mbp_priv = (struct rte_pktmbuf_pool_private *)
411 ((char *)mp + sizeof(struct rte_mempool));
412 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
416 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
417 unsigned int socket_id)
419 char pool_name[RTE_MEMPOOL_NAMESIZE];
420 struct rte_mempool *rte_mp;
421 struct mbuf_pool_ctor_arg mbp_ctor_arg;
422 struct mbuf_ctor_arg mb_ctor_arg;
425 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
427 mb_ctor_arg.seg_buf_offset =
428 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
429 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
430 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
431 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
432 rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
433 (unsigned) mb_mempool_cache,
434 sizeof(struct rte_pktmbuf_pool_private),
435 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
436 testpmd_mbuf_ctor, &mb_ctor_arg,
438 if (rte_mp == NULL) {
439 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
440 "failed\n", socket_id);
448 struct rte_port *port;
449 struct rte_mempool *mbp;
450 unsigned int nb_mbuf_per_pool;
452 uint8_t port_per_socket[MAX_SOCKET];
454 memset(port_per_socket,0,MAX_SOCKET);
455 /* Configuration of logical cores. */
456 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
457 sizeof(struct fwd_lcore *) * nb_lcores,
459 if (fwd_lcores == NULL) {
460 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
461 "failed\n", nb_lcores);
463 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
464 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
465 sizeof(struct fwd_lcore),
467 if (fwd_lcores[lc_id] == NULL) {
468 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
471 fwd_lcores[lc_id]->cpuid_idx = lc_id;
475 * Create pools of mbuf.
476 * If NUMA support is disabled, create a single pool of mbuf in
477 * socket 0 memory by default.
478 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
480 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
481 * nb_txd can be configured at run time.
483 if (param_total_num_mbufs)
484 nb_mbuf_per_pool = param_total_num_mbufs;
486 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
487 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
490 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
494 if (socket_num == UMA_NO_CONFIG)
495 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
497 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
501 * Records which Mbuf pool to use by each logical core, if needed.
503 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
504 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
506 mbp = mbuf_pool_find(0);
507 fwd_lcores[lc_id]->mbp = mbp;
510 /* Configuration of Ethernet ports. */
511 ports = rte_zmalloc("testpmd: ports",
512 sizeof(struct rte_port) * nb_ports,
515 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
516 "failed\n", nb_ports);
519 for (pid = 0; pid < nb_ports; pid++) {
521 rte_eth_dev_info_get(pid, &port->dev_info);
524 if (port_numa[pid] != NUMA_NO_CONFIG)
525 port_per_socket[port_numa[pid]]++;
527 uint32_t socket_id = rte_eth_dev_socket_id(pid);
528 port_per_socket[socket_id]++;
532 /* set flag to initialize port/queue */
533 port->need_reconfig = 1;
534 port->need_reconfig_queues = 1;
539 unsigned int nb_mbuf;
541 if (param_total_num_mbufs)
542 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
544 for (i = 0; i < MAX_SOCKET; i++) {
545 nb_mbuf = (nb_mbuf_per_pool *
548 mbuf_pool_create(mbuf_data_size,
553 /* Configuration of packet forwarding streams. */
554 if (init_fwd_streams() < 0)
555 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
559 init_fwd_streams(void)
562 struct rte_port *port;
563 streamid_t sm_id, nb_fwd_streams_new;
565 /* set socket id according to numa or not */
566 for (pid = 0; pid < nb_ports; pid++) {
568 if (nb_rxq > port->dev_info.max_rx_queues) {
569 printf("Fail: nb_rxq(%d) is greater than "
570 "max_rx_queues(%d)\n", nb_rxq,
571 port->dev_info.max_rx_queues);
574 if (nb_txq > port->dev_info.max_tx_queues) {
575 printf("Fail: nb_txq(%d) is greater than "
576 "max_tx_queues(%d)\n", nb_txq,
577 port->dev_info.max_tx_queues);
581 port->socket_id = rte_eth_dev_socket_id(pid);
583 if (socket_num == UMA_NO_CONFIG)
586 port->socket_id = socket_num;
590 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
591 if (nb_fwd_streams_new == nb_fwd_streams)
594 if (fwd_streams != NULL) {
595 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
596 if (fwd_streams[sm_id] == NULL)
598 rte_free(fwd_streams[sm_id]);
599 fwd_streams[sm_id] = NULL;
601 rte_free(fwd_streams);
606 nb_fwd_streams = nb_fwd_streams_new;
607 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
608 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
609 if (fwd_streams == NULL)
610 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
611 "failed\n", nb_fwd_streams);
613 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
614 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
615 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
616 if (fwd_streams[sm_id] == NULL)
617 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
624 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
626 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
628 unsigned int total_burst;
629 unsigned int nb_burst;
630 unsigned int burst_stats[3];
631 uint16_t pktnb_stats[3];
633 int burst_percent[3];
636 * First compute the total number of packet bursts and the
637 * two highest numbers of bursts of the same number of packets.
640 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
641 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
642 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
643 nb_burst = pbs->pkt_burst_spread[nb_pkt];
646 total_burst += nb_burst;
647 if (nb_burst > burst_stats[0]) {
648 burst_stats[1] = burst_stats[0];
649 pktnb_stats[1] = pktnb_stats[0];
650 burst_stats[0] = nb_burst;
651 pktnb_stats[0] = nb_pkt;
654 if (total_burst == 0)
656 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
657 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
658 burst_percent[0], (int) pktnb_stats[0]);
659 if (burst_stats[0] == total_burst) {
663 if (burst_stats[0] + burst_stats[1] == total_burst) {
664 printf(" + %d%% of %d pkts]\n",
665 100 - burst_percent[0], pktnb_stats[1]);
668 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
669 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
670 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
671 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
674 printf(" + %d%% of %d pkts + %d%% of others]\n",
675 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
677 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
680 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
682 struct rte_port *port;
685 static const char *fwd_stats_border = "----------------------";
687 port = &ports[port_id];
688 printf("\n %s Forward statistics for port %-2d %s\n",
689 fwd_stats_border, port_id, fwd_stats_border);
691 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
692 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
694 stats->ipackets, stats->ierrors,
695 (uint64_t) (stats->ipackets + stats->ierrors));
697 if (cur_fwd_eng == &csum_fwd_engine)
698 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
699 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
701 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
703 stats->opackets, port->tx_dropped,
704 (uint64_t) (stats->opackets + port->tx_dropped));
706 if (stats->rx_nombuf > 0)
707 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
711 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
713 stats->ipackets, stats->ierrors,
714 (uint64_t) (stats->ipackets + stats->ierrors));
716 if (cur_fwd_eng == &csum_fwd_engine)
717 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
718 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
720 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
722 stats->opackets, port->tx_dropped,
723 (uint64_t) (stats->opackets + port->tx_dropped));
725 if (stats->rx_nombuf > 0)
726 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
728 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
730 pkt_burst_stats_display("RX",
731 &port->rx_stream->rx_burst_stats);
733 pkt_burst_stats_display("TX",
734 &port->tx_stream->tx_burst_stats);
737 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
738 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
742 if (port->rx_queue_stats_mapping_enabled) {
744 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
745 printf(" Stats reg %2d RX-packets:%14"PRIu64
746 " RX-errors:%14"PRIu64
747 " RX-bytes:%14"PRIu64"\n",
748 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
752 if (port->tx_queue_stats_mapping_enabled) {
753 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
754 printf(" Stats reg %2d TX-packets:%14"PRIu64
755 " TX-bytes:%14"PRIu64"\n",
756 i, stats->q_opackets[i], stats->q_obytes[i]);
760 printf(" %s--------------------------------%s\n",
761 fwd_stats_border, fwd_stats_border);
765 fwd_stream_stats_display(streamid_t stream_id)
767 struct fwd_stream *fs;
768 static const char *fwd_top_stats_border = "-------";
770 fs = fwd_streams[stream_id];
771 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
772 (fs->fwd_dropped == 0))
774 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
775 "TX Port=%2d/Queue=%2d %s\n",
776 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
777 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
778 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
779 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
781 /* if checksum mode */
782 if (cur_fwd_eng == &csum_fwd_engine) {
783 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
784 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
787 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
788 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
789 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
794 flush_fwd_rx_queues(void)
796 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
804 for (j = 0; j < 2; j++) {
805 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
806 for (rxq = 0; rxq < nb_rxq; rxq++) {
807 port_id = fwd_ports_ids[rxp];
809 nb_rx = rte_eth_rx_burst(port_id, rxq,
810 pkts_burst, MAX_PKT_BURST);
811 for (i = 0; i < nb_rx; i++)
812 rte_pktmbuf_free(pkts_burst[i]);
816 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
821 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
823 struct fwd_stream **fsm;
827 fsm = &fwd_streams[fc->stream_idx];
828 nb_fs = fc->stream_nb;
830 for (sm_id = 0; sm_id < nb_fs; sm_id++)
831 (*pkt_fwd)(fsm[sm_id]);
832 } while (! fc->stopped);
836 start_pkt_forward_on_core(void *fwd_arg)
838 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
839 cur_fwd_config.fwd_eng->packet_fwd);
844 * Run the TXONLY packet forwarding engine to send a single burst of packets.
845 * Used to start communication flows in network loopback test configurations.
848 run_one_txonly_burst_on_core(void *fwd_arg)
850 struct fwd_lcore *fwd_lc;
851 struct fwd_lcore tmp_lcore;
853 fwd_lc = (struct fwd_lcore *) fwd_arg;
855 tmp_lcore.stopped = 1;
856 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
861 * Launch packet forwarding:
862 * - Setup per-port forwarding context.
863 * - launch logical cores with their forwarding configuration.
866 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
868 port_fwd_begin_t port_fwd_begin;
873 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
874 if (port_fwd_begin != NULL) {
875 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
876 (*port_fwd_begin)(fwd_ports_ids[i]);
878 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
879 lc_id = fwd_lcores_cpuids[i];
880 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
881 fwd_lcores[i]->stopped = 0;
882 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
883 fwd_lcores[i], lc_id);
885 printf("launch lcore %u failed - diag=%d\n",
892 * Launch packet forwarding configuration.
895 start_packet_forwarding(int with_tx_first)
897 port_fwd_begin_t port_fwd_begin;
898 port_fwd_end_t port_fwd_end;
899 struct rte_port *port;
904 if (all_ports_started() == 0) {
905 printf("Not all ports were started\n");
908 if (test_done == 0) {
909 printf("Packet forwarding already started\n");
913 for (i = 0; i < nb_fwd_ports; i++) {
914 pt_id = fwd_ports_ids[i];
915 port = &ports[pt_id];
916 if (!port->dcb_flag) {
917 printf("In DCB mode, all forwarding ports must "
918 "be configured in this mode.\n");
922 if (nb_fwd_lcores == 1) {
923 printf("In DCB mode,the nb forwarding cores "
924 "should be larger than 1.\n");
931 flush_fwd_rx_queues();
934 rxtx_config_display();
936 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
937 pt_id = fwd_ports_ids[i];
938 port = &ports[pt_id];
939 rte_eth_stats_get(pt_id, &port->stats);
940 port->tx_dropped = 0;
942 map_port_queue_stats_mapping_registers(pt_id, port);
944 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
945 fwd_streams[sm_id]->rx_packets = 0;
946 fwd_streams[sm_id]->tx_packets = 0;
947 fwd_streams[sm_id]->fwd_dropped = 0;
948 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
949 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
951 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
952 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
953 sizeof(fwd_streams[sm_id]->rx_burst_stats));
954 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
955 sizeof(fwd_streams[sm_id]->tx_burst_stats));
957 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
958 fwd_streams[sm_id]->core_cycles = 0;
962 port_fwd_begin = tx_only_engine.port_fwd_begin;
963 if (port_fwd_begin != NULL) {
964 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
965 (*port_fwd_begin)(fwd_ports_ids[i]);
967 launch_packet_forwarding(run_one_txonly_burst_on_core);
968 rte_eal_mp_wait_lcore();
969 port_fwd_end = tx_only_engine.port_fwd_end;
970 if (port_fwd_end != NULL) {
971 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
972 (*port_fwd_end)(fwd_ports_ids[i]);
975 launch_packet_forwarding(start_pkt_forward_on_core);
979 stop_packet_forwarding(void)
981 struct rte_eth_stats stats;
982 struct rte_port *port;
983 port_fwd_end_t port_fwd_end;
990 uint64_t total_rx_dropped;
991 uint64_t total_tx_dropped;
992 uint64_t total_rx_nombuf;
994 uint64_t rx_bad_ip_csum;
995 uint64_t rx_bad_l4_csum;
996 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
999 static const char *acc_stats_border = "+++++++++++++++";
1001 if (all_ports_started() == 0) {
1002 printf("Not all ports were started\n");
1006 printf("Packet forwarding not started\n");
1009 printf("Telling cores to stop...");
1010 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1011 fwd_lcores[lc_id]->stopped = 1;
1012 printf("\nWaiting for lcores to finish...\n");
1013 rte_eal_mp_wait_lcore();
1014 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1015 if (port_fwd_end != NULL) {
1016 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1017 pt_id = fwd_ports_ids[i];
1018 (*port_fwd_end)(pt_id);
1021 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1024 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1025 if (cur_fwd_config.nb_fwd_streams >
1026 cur_fwd_config.nb_fwd_ports) {
1027 fwd_stream_stats_display(sm_id);
1028 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1029 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1031 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1033 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1036 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1037 tx_dropped = (uint64_t) (tx_dropped +
1038 fwd_streams[sm_id]->fwd_dropped);
1039 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1042 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1043 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1044 fwd_streams[sm_id]->rx_bad_ip_csum);
1045 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1049 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1050 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1051 fwd_streams[sm_id]->rx_bad_l4_csum);
1052 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1055 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1056 fwd_cycles = (uint64_t) (fwd_cycles +
1057 fwd_streams[sm_id]->core_cycles);
1062 total_rx_dropped = 0;
1063 total_tx_dropped = 0;
1064 total_rx_nombuf = 0;
1065 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1066 pt_id = fwd_ports_ids[i];
1068 port = &ports[pt_id];
1069 rte_eth_stats_get(pt_id, &stats);
1070 stats.ipackets -= port->stats.ipackets;
1071 port->stats.ipackets = 0;
1072 stats.opackets -= port->stats.opackets;
1073 port->stats.opackets = 0;
1074 stats.ibytes -= port->stats.ibytes;
1075 port->stats.ibytes = 0;
1076 stats.obytes -= port->stats.obytes;
1077 port->stats.obytes = 0;
1078 stats.ierrors -= port->stats.ierrors;
1079 port->stats.ierrors = 0;
1080 stats.oerrors -= port->stats.oerrors;
1081 port->stats.oerrors = 0;
1082 stats.rx_nombuf -= port->stats.rx_nombuf;
1083 port->stats.rx_nombuf = 0;
1084 stats.fdirmatch -= port->stats.fdirmatch;
1085 port->stats.rx_nombuf = 0;
1086 stats.fdirmiss -= port->stats.fdirmiss;
1087 port->stats.rx_nombuf = 0;
1089 total_recv += stats.ipackets;
1090 total_xmit += stats.opackets;
1091 total_rx_dropped += stats.ierrors;
1092 total_tx_dropped += port->tx_dropped;
1093 total_rx_nombuf += stats.rx_nombuf;
1095 fwd_port_stats_display(pt_id, &stats);
1097 printf("\n %s Accumulated forward statistics for all ports"
1099 acc_stats_border, acc_stats_border);
1100 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1102 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1104 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1105 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1106 if (total_rx_nombuf > 0)
1107 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1108 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1110 acc_stats_border, acc_stats_border);
1111 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1113 printf("\n CPU cycles/packet=%u (total cycles="
1114 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1115 (unsigned int)(fwd_cycles / total_recv),
1116 fwd_cycles, total_recv);
1118 printf("\nDone.\n");
1123 all_ports_started(void)
1126 struct rte_port *port;
1128 for (pi = 0; pi < nb_ports; pi++) {
1130 /* Check if there is a port which is not started */
1131 if (port->port_status != RTE_PORT_STARTED)
1135 /* No port is not started */
1140 start_port(portid_t pid)
1142 int diag, need_check_link_status = 0;
1145 struct rte_port *port;
1147 if (test_done == 0) {
1148 printf("Please stop forwarding first\n");
1152 if (init_fwd_streams() < 0) {
1153 printf("Fail from init_fwd_streams()\n");
1159 for (pi = 0; pi < nb_ports; pi++) {
1160 if (pid < nb_ports && pid != pi)
1164 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1165 RTE_PORT_HANDLING) == 0) {
1166 printf("Port %d is now not stopped\n", pi);
1170 if (port->need_reconfig > 0) {
1171 port->need_reconfig = 0;
1173 printf("Configuring Port %d (socket %d)\n", pi,
1174 rte_eth_dev_socket_id(pi));
1175 /* configure port */
1176 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1179 if (rte_atomic16_cmpset(&(port->port_status),
1180 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1181 printf("Port %d can not be set back "
1182 "to stopped\n", pi);
1183 printf("Fail to configure port %d\n", pi);
1184 /* try to reconfigure port next time */
1185 port->need_reconfig = 1;
1189 if (port->need_reconfig_queues > 0) {
1190 port->need_reconfig_queues = 0;
1191 /* setup tx queues */
1192 for (qi = 0; qi < nb_txq; qi++) {
1193 if ((numa_support) &&
1194 (txring_numa[pi] != NUMA_NO_CONFIG))
1195 diag = rte_eth_tx_queue_setup(pi, qi,
1196 nb_txd,txring_numa[pi],
1199 diag = rte_eth_tx_queue_setup(pi, qi,
1200 nb_txd,port->socket_id,
1206 /* Fail to setup tx queue, return */
1207 if (rte_atomic16_cmpset(&(port->port_status),
1209 RTE_PORT_STOPPED) == 0)
1210 printf("Port %d can not be set back "
1211 "to stopped\n", pi);
1212 printf("Fail to configure port %d tx queues\n", pi);
1213 /* try to reconfigure queues next time */
1214 port->need_reconfig_queues = 1;
1217 /* setup rx queues */
1218 for (qi = 0; qi < nb_rxq; qi++) {
1219 if ((numa_support) &&
1220 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1221 struct rte_mempool * mp =
1222 mbuf_pool_find(rxring_numa[pi]);
1224 printf("Failed to setup RX queue:"
1225 "No mempool allocation"
1226 "on the socket %d\n",
1231 diag = rte_eth_rx_queue_setup(pi, qi,
1232 nb_rxd,rxring_numa[pi],
1233 &(port->rx_conf),mp);
1236 diag = rte_eth_rx_queue_setup(pi, qi,
1237 nb_rxd,port->socket_id,
1239 mbuf_pool_find(port->socket_id));
1245 /* Fail to setup rx queue, return */
1246 if (rte_atomic16_cmpset(&(port->port_status),
1248 RTE_PORT_STOPPED) == 0)
1249 printf("Port %d can not be set back "
1250 "to stopped\n", pi);
1251 printf("Fail to configure port %d rx queues\n", pi);
1252 /* try to reconfigure queues next time */
1253 port->need_reconfig_queues = 1;
1258 if (rte_eth_dev_start(pi) < 0) {
1259 printf("Fail to start port %d\n", pi);
1261 /* Fail to setup rx queue, return */
1262 if (rte_atomic16_cmpset(&(port->port_status),
1263 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1264 printf("Port %d can not be set back to "
1269 if (rte_atomic16_cmpset(&(port->port_status),
1270 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1271 printf("Port %d can not be set into started\n", pi);
1273 /* at least one port started, need checking link status */
1274 need_check_link_status = 1;
1277 if (need_check_link_status)
1278 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1280 printf("Please stop the ports first\n");
1286 stop_port(portid_t pid)
1289 struct rte_port *port;
1290 int need_check_link_status = 0;
1292 if (test_done == 0) {
1293 printf("Please stop forwarding first\n");
1300 printf("Stopping ports...\n");
1302 for (pi = 0; pi < nb_ports; pi++) {
1303 if (pid < nb_ports && pid != pi)
1307 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1308 RTE_PORT_HANDLING) == 0)
1311 rte_eth_dev_stop(pi);
1313 if (rte_atomic16_cmpset(&(port->port_status),
1314 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1315 printf("Port %d can not be set into stopped\n", pi);
1316 need_check_link_status = 1;
1318 if (need_check_link_status)
1319 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1325 close_port(portid_t pid)
1328 struct rte_port *port;
1330 if (test_done == 0) {
1331 printf("Please stop forwarding first\n");
1335 printf("Closing ports...\n");
1337 for (pi = 0; pi < nb_ports; pi++) {
1338 if (pid < nb_ports && pid != pi)
1342 if (rte_atomic16_cmpset(&(port->port_status),
1343 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1344 printf("Port %d is now not stopped\n", pi);
1348 rte_eth_dev_close(pi);
1350 if (rte_atomic16_cmpset(&(port->port_status),
1351 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1352 printf("Port %d can not be set into stopped\n", pi);
1359 all_ports_stopped(void)
1362 struct rte_port *port;
1364 for (pi = 0; pi < nb_ports; pi++) {
1366 if (port->port_status != RTE_PORT_STOPPED)
1378 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1379 printf("Stopping port %d...", pt_id);
1381 rte_eth_dev_close(pt_id);
1387 typedef void (*cmd_func_t)(void);
1388 struct pmd_test_command {
1389 const char *cmd_name;
1390 cmd_func_t cmd_func;
1393 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1395 /* Check the link status of all ports in up to 9s, and print them finally */
1397 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1399 #define CHECK_INTERVAL 100 /* 100ms */
1400 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1401 uint8_t portid, count, all_ports_up, print_flag = 0;
1402 struct rte_eth_link link;
1404 printf("Checking link statuses...\n");
1406 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1408 for (portid = 0; portid < port_num; portid++) {
1409 if ((port_mask & (1 << portid)) == 0)
1411 memset(&link, 0, sizeof(link));
1412 rte_eth_link_get_nowait(portid, &link);
1413 /* print link status if flag set */
1414 if (print_flag == 1) {
1415 if (link.link_status)
1416 printf("Port %d Link Up - speed %u "
1417 "Mbps - %s\n", (uint8_t)portid,
1418 (unsigned)link.link_speed,
1419 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1420 ("full-duplex") : ("half-duplex\n"));
1422 printf("Port %d Link Down\n",
1426 /* clear all_ports_up flag if any link down */
1427 if (link.link_status == 0) {
1432 /* after finally printing all link status, get out */
1433 if (print_flag == 1)
1436 if (all_ports_up == 0) {
1438 rte_delay_ms(CHECK_INTERVAL);
1441 /* set the print_flag if all ports up or timeout */
1442 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1449 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1453 uint8_t mapping_found = 0;
1455 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1456 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1457 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1458 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1459 tx_queue_stats_mappings[i].queue_id,
1460 tx_queue_stats_mappings[i].stats_counter_id);
1467 port->tx_queue_stats_mapping_enabled = 1;
1472 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1476 uint8_t mapping_found = 0;
1478 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1479 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1480 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1481 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1482 rx_queue_stats_mappings[i].queue_id,
1483 rx_queue_stats_mappings[i].stats_counter_id);
1490 port->rx_queue_stats_mapping_enabled = 1;
1495 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1499 diag = set_tx_queue_stats_mapping_registers(pi, port);
1501 if (diag == -ENOTSUP) {
1502 port->tx_queue_stats_mapping_enabled = 0;
1503 printf("TX queue stats mapping not supported port id=%d\n", pi);
1506 rte_exit(EXIT_FAILURE,
1507 "set_tx_queue_stats_mapping_registers "
1508 "failed for port id=%d diag=%d\n",
1512 diag = set_rx_queue_stats_mapping_registers(pi, port);
1514 if (diag == -ENOTSUP) {
1515 port->rx_queue_stats_mapping_enabled = 0;
1516 printf("RX queue stats mapping not supported port id=%d\n", pi);
1519 rte_exit(EXIT_FAILURE,
1520 "set_rx_queue_stats_mapping_registers "
1521 "failed for port id=%d diag=%d\n",
1527 init_port_config(void)
1530 struct rte_port *port;
1532 for (pid = 0; pid < nb_ports; pid++) {
1534 port->dev_conf.rxmode = rx_mode;
1535 port->dev_conf.fdir_conf = fdir_conf;
1537 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1538 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1540 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1541 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1543 port->rx_conf.rx_thresh = rx_thresh;
1544 port->rx_conf.rx_free_thresh = rx_free_thresh;
1545 port->rx_conf.rx_drop_en = rx_drop_en;
1546 port->tx_conf.tx_thresh = tx_thresh;
1547 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1548 port->tx_conf.tx_free_thresh = tx_free_thresh;
1549 port->tx_conf.txq_flags = txq_flags;
1551 rte_eth_macaddr_get(pid, &port->eth_addr);
1553 map_port_queue_stats_mapping_registers(pid, port);
1554 #ifdef RTE_NIC_BYPASS
1555 rte_eth_dev_bypass_init(pid);
1560 const uint16_t vlan_tags[] = {
1561 0, 1, 2, 3, 4, 5, 6, 7,
1562 8, 9, 10, 11, 12, 13, 14, 15,
1563 16, 17, 18, 19, 20, 21, 22, 23,
1564 24, 25, 26, 27, 28, 29, 30, 31
1568 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1573 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1574 * given above, and the number of traffic classes available for use.
1576 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1577 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1578 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1580 /* VMDQ+DCB RX and TX configrations */
1581 vmdq_rx_conf.enable_default_pool = 0;
1582 vmdq_rx_conf.default_pool = 0;
1583 vmdq_rx_conf.nb_queue_pools =
1584 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1585 vmdq_tx_conf.nb_queue_pools =
1586 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1588 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1589 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1590 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1591 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1593 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1594 vmdq_rx_conf.dcb_queue[i] = i;
1595 vmdq_tx_conf.dcb_queue[i] = i;
1598 /*set DCB mode of RX and TX of multiple queues*/
1599 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1600 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1601 if (dcb_conf->pfc_en)
1602 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1604 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1606 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1607 sizeof(struct rte_eth_vmdq_dcb_conf)));
1608 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1609 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1612 struct rte_eth_dcb_rx_conf rx_conf;
1613 struct rte_eth_dcb_tx_conf tx_conf;
1615 /* queue mapping configuration of DCB RX and TX */
1616 if (dcb_conf->num_tcs == ETH_4_TCS)
1617 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1619 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1621 rx_conf.nb_tcs = dcb_conf->num_tcs;
1622 tx_conf.nb_tcs = dcb_conf->num_tcs;
1624 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1625 rx_conf.dcb_queue[i] = i;
1626 tx_conf.dcb_queue[i] = i;
1628 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1629 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1630 if (dcb_conf->pfc_en)
1631 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1633 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1635 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1636 sizeof(struct rte_eth_dcb_rx_conf)));
1637 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1638 sizeof(struct rte_eth_dcb_tx_conf)));
1645 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1647 struct rte_eth_conf port_conf;
1648 struct rte_port *rte_port;
1653 /* rxq and txq configuration in dcb mode */
1656 rx_free_thresh = 64;
1658 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1659 /* Enter DCB configuration status */
1662 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1663 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1664 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1668 rte_port = &ports[pid];
1669 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1671 rte_port->rx_conf.rx_thresh = rx_thresh;
1672 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1673 rte_port->tx_conf.tx_thresh = tx_thresh;
1674 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1675 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1677 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1678 for (i = 0; i < nb_vlan; i++){
1679 rx_vft_set(pid, vlan_tags[i], 1);
1682 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1683 map_port_queue_stats_mapping_registers(pid, rte_port);
1685 rte_port->dcb_flag = 1;
1690 #ifdef RTE_EXEC_ENV_BAREMETAL
1695 main(int argc, char** argv)
1700 diag = rte_eal_init(argc, argv);
1702 rte_panic("Cannot init EAL\n");
1704 if (rte_pmd_init_all())
1705 rte_panic("Cannot init PMD\n");
1707 if (rte_eal_pci_probe())
1708 rte_panic("Cannot probe PCI\n");
1710 nb_ports = (portid_t) rte_eth_dev_count();
1712 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1714 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1715 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1716 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1717 "configuration file\n");
1719 set_def_fwd_config();
1721 rte_panic("Empty set of forwarding logical cores - check the "
1722 "core mask supplied in the command parameters\n");
1727 launch_args_parse(argc, argv);
1729 if (nb_rxq > nb_txq)
1730 printf("Warning: nb_rxq=%d enables RSS configuration, "
1731 "but nb_txq=%d will prevent to fully test it.\n",
1735 start_port(RTE_PORT_ALL);
1737 /* set all ports to promiscuous mode by default */
1738 for (port_id = 0; port_id < nb_ports; port_id++)
1739 rte_eth_promiscuous_enable(port_id);
1741 if (interactive == 1)
1747 printf("No commandline core given, start packet forwarding\n");
1748 start_packet_forwarding(0);
1749 printf("Press enter to exit\n");
1750 rc = read(0, &c, 1);