4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
78 uint16_t verbose_level = 0; /**< Silent by default. */
80 /* use master core for command line ? */
81 uint8_t interactive = 0;
84 * NUMA support configuration.
85 * When set, the NUMA support attempts to dispatch the allocation of the
86 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87 * probed ports among the CPU sockets 0 and 1.
88 * Otherwise, all memory is allocated from CPU socket 0.
90 uint8_t numa_support = 0; /**< No numa support by default */
93 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
96 uint8_t socket_num = UMA_NO_CONFIG;
99 * Record the Ethernet address of peer target ports to which packets are
101 * Must be instanciated with the ethernet addresses of peer traffic generator
104 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105 portid_t nb_peer_eth_addrs = 0;
108 * Probed Target Environment.
110 struct rte_port *ports; /**< For all probed ethernet ports. */
111 portid_t nb_ports; /**< Number of probed ethernet ports. */
112 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
116 * Test Forwarding Configuration.
117 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
120 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122 portid_t nb_cfg_ports; /**< Number of configured ports. */
123 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
125 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
128 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
132 * Forwarding engines.
134 struct fwd_engine * fwd_engines[] = {
140 #ifdef RTE_LIBRTE_IEEE1588
141 &ieee1588_fwd_engine,
146 struct fwd_config cur_fwd_config;
147 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
149 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
151 * specified on command-line. */
154 * Configuration of packet segments used by the "txonly" processing engine.
156 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158 TXONLY_DEF_PACKET_LEN,
160 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
162 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
165 /* current configuration is in DCB or not,0 means it is not in DCB mode */
166 uint8_t dcb_config = 0;
168 /* Whether the dcb is in testing status */
169 uint8_t dcb_test = 0;
171 /* DCB on and VT on mapping is default */
172 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
175 * Configurable number of RX/TX queues.
177 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
181 * Configurable number of RX/TX ring descriptors.
183 #define RTE_TEST_RX_DESC_DEFAULT 128
184 #define RTE_TEST_TX_DESC_DEFAULT 512
185 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
189 * Configurable values of RX and TX ring threshold registers.
191 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
195 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
199 struct rte_eth_thresh rx_thresh = {
200 .pthresh = RX_PTHRESH,
201 .hthresh = RX_HTHRESH,
202 .wthresh = RX_WTHRESH,
205 struct rte_eth_thresh tx_thresh = {
206 .pthresh = TX_PTHRESH,
207 .hthresh = TX_HTHRESH,
208 .wthresh = TX_WTHRESH,
212 * Configurable value of RX free threshold.
214 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
217 * Configurable value of RX drop enable.
219 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
222 * Configurable value of TX free threshold.
224 uint16_t tx_free_thresh = 0; /* Use default values. */
227 * Configurable value of TX RS bit threshold.
229 uint16_t tx_rs_thresh = 0; /* Use default values. */
232 * Configurable value of TX queue flags.
234 uint32_t txq_flags = 0; /* No flags set. */
237 * Receive Side Scaling (RSS) configuration.
239 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
242 * Port topology configuration
244 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
248 * Avoids to flush all the RX streams before starts forwarding.
250 uint8_t no_flush_rx = 0; /* flush by default */
253 * Ethernet device configuration.
255 struct rte_eth_rxmode rx_mode = {
256 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
258 .header_split = 0, /**< Header Split disabled. */
259 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
260 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
261 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
262 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
263 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
264 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
267 struct rte_fdir_conf fdir_conf = {
268 .mode = RTE_FDIR_MODE_NONE,
269 .pballoc = RTE_FDIR_PBALLOC_64K,
270 .status = RTE_FDIR_REPORT_STATUS,
271 .flexbytes_offset = 0x6,
275 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
277 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
278 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
280 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
281 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
283 uint16_t nb_tx_queue_stats_mappings = 0;
284 uint16_t nb_rx_queue_stats_mappings = 0;
286 /* Forward function declarations */
287 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
288 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
291 * Check if all the ports are started.
292 * If yes, return positive value. If not, return zero.
294 static int all_ports_started(void);
297 * Setup default configuration.
300 set_default_fwd_lcores_config(void)
306 for (i = 0; i < RTE_MAX_LCORE; i++) {
307 if (! rte_lcore_is_enabled(i))
309 if (i == rte_get_master_lcore())
311 fwd_lcores_cpuids[nb_lc++] = i;
313 nb_lcores = (lcoreid_t) nb_lc;
314 nb_cfg_lcores = nb_lcores;
319 set_def_peer_eth_addrs(void)
323 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
324 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
325 peer_eth_addrs[i].addr_bytes[5] = i;
330 set_default_fwd_ports_config(void)
334 for (pt_id = 0; pt_id < nb_ports; pt_id++)
335 fwd_ports_ids[pt_id] = pt_id;
337 nb_cfg_ports = nb_ports;
338 nb_fwd_ports = nb_ports;
342 set_def_fwd_config(void)
344 set_default_fwd_lcores_config();
345 set_def_peer_eth_addrs();
346 set_default_fwd_ports_config();
350 * Configuration initialisation done once at init time.
352 struct mbuf_ctor_arg {
353 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
354 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
357 struct mbuf_pool_ctor_arg {
358 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
362 testpmd_mbuf_ctor(struct rte_mempool *mp,
365 __attribute__((unused)) unsigned i)
367 struct mbuf_ctor_arg *mb_ctor_arg;
370 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
371 mb = (struct rte_mbuf *) raw_mbuf;
373 mb->type = RTE_MBUF_PKT;
375 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
376 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
377 mb_ctor_arg->seg_buf_offset);
378 mb->buf_len = mb_ctor_arg->seg_buf_size;
379 mb->type = RTE_MBUF_PKT;
381 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
383 mb->pkt.vlan_macip.data = 0;
384 mb->pkt.hash.rss = 0;
388 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
391 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
392 struct rte_pktmbuf_pool_private *mbp_priv;
394 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
395 printf("%s(%s) private_data_size %d < %d\n",
396 __func__, mp->name, (int) mp->private_data_size,
397 (int) sizeof(struct rte_pktmbuf_pool_private));
400 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
401 mbp_priv = (struct rte_pktmbuf_pool_private *)
402 ((char *)mp + sizeof(struct rte_mempool));
403 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
407 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
408 unsigned int socket_id)
410 char pool_name[RTE_MEMPOOL_NAMESIZE];
411 struct rte_mempool *rte_mp;
412 struct mbuf_pool_ctor_arg mbp_ctor_arg;
413 struct mbuf_ctor_arg mb_ctor_arg;
416 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
418 mb_ctor_arg.seg_buf_offset =
419 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
420 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
421 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
422 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
423 rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
424 (unsigned) mb_mempool_cache,
425 sizeof(struct rte_pktmbuf_pool_private),
426 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
427 testpmd_mbuf_ctor, &mb_ctor_arg,
429 if (rte_mp == NULL) {
430 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
431 "failed\n", socket_id);
439 struct rte_port *port;
440 struct rte_mempool *mbp;
441 unsigned int nb_mbuf_per_pool;
443 uint8_t port_per_socket[MAX_SOCKET];
445 memset(port_per_socket,0,MAX_SOCKET);
446 /* Configuration of logical cores. */
447 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
448 sizeof(struct fwd_lcore *) * nb_lcores,
450 if (fwd_lcores == NULL) {
451 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
452 "failed\n", nb_lcores);
454 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
455 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
456 sizeof(struct fwd_lcore),
458 if (fwd_lcores[lc_id] == NULL) {
459 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
462 fwd_lcores[lc_id]->cpuid_idx = lc_id;
466 * Create pools of mbuf.
467 * If NUMA support is disabled, create a single pool of mbuf in
468 * socket 0 memory by default.
469 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
471 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
472 * nb_txd can be configured at run time.
474 if (param_total_num_mbufs)
475 nb_mbuf_per_pool = param_total_num_mbufs;
477 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
478 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
481 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
485 if (socket_num == UMA_NO_CONFIG)
486 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
488 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
492 * Records which Mbuf pool to use by each logical core, if needed.
494 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
495 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
497 mbp = mbuf_pool_find(0);
498 fwd_lcores[lc_id]->mbp = mbp;
501 /* Configuration of Ethernet ports. */
502 ports = rte_zmalloc("testpmd: ports",
503 sizeof(struct rte_port) * nb_ports,
506 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
507 "failed\n", nb_ports);
510 for (pid = 0; pid < nb_ports; pid++) {
512 rte_eth_dev_info_get(pid, &port->dev_info);
515 if (port_numa[pid] != NUMA_NO_CONFIG)
516 port_per_socket[port_numa[pid]]++;
518 uint32_t socket_id = rte_eth_dev_socket_id(pid);
519 port_per_socket[socket_id]++;
523 /* set flag to initialize port/queue */
524 port->need_reconfig = 1;
525 port->need_reconfig_queues = 1;
530 unsigned int nb_mbuf;
532 if (param_total_num_mbufs)
533 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
535 for (i = 0; i < MAX_SOCKET; i++) {
536 nb_mbuf = (nb_mbuf_per_pool *
539 mbuf_pool_create(mbuf_data_size,
544 /* Configuration of packet forwarding streams. */
545 if (init_fwd_streams() < 0)
546 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
550 init_fwd_streams(void)
553 struct rte_port *port;
554 streamid_t sm_id, nb_fwd_streams_new;
556 /* set socket id according to numa or not */
557 for (pid = 0; pid < nb_ports; pid++) {
559 if (nb_rxq > port->dev_info.max_rx_queues) {
560 printf("Fail: nb_rxq(%d) is greater than "
561 "max_rx_queues(%d)\n", nb_rxq,
562 port->dev_info.max_rx_queues);
565 if (nb_txq > port->dev_info.max_tx_queues) {
566 printf("Fail: nb_txq(%d) is greater than "
567 "max_tx_queues(%d)\n", nb_txq,
568 port->dev_info.max_tx_queues);
572 port->socket_id = rte_eth_dev_socket_id(pid);
574 if (socket_num == UMA_NO_CONFIG)
577 port->socket_id = socket_num;
581 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
582 if (nb_fwd_streams_new == nb_fwd_streams)
585 if (fwd_streams != NULL) {
586 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
587 if (fwd_streams[sm_id] == NULL)
589 rte_free(fwd_streams[sm_id]);
590 fwd_streams[sm_id] = NULL;
592 rte_free(fwd_streams);
597 nb_fwd_streams = nb_fwd_streams_new;
598 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
599 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
600 if (fwd_streams == NULL)
601 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
602 "failed\n", nb_fwd_streams);
604 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
605 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
606 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
607 if (fwd_streams[sm_id] == NULL)
608 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
615 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
617 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
619 unsigned int total_burst;
620 unsigned int nb_burst;
621 unsigned int burst_stats[3];
622 uint16_t pktnb_stats[3];
624 int burst_percent[3];
627 * First compute the total number of packet bursts and the
628 * two highest numbers of bursts of the same number of packets.
631 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
632 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
633 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
634 nb_burst = pbs->pkt_burst_spread[nb_pkt];
637 total_burst += nb_burst;
638 if (nb_burst > burst_stats[0]) {
639 burst_stats[1] = burst_stats[0];
640 pktnb_stats[1] = pktnb_stats[0];
641 burst_stats[0] = nb_burst;
642 pktnb_stats[0] = nb_pkt;
645 if (total_burst == 0)
647 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
648 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
649 burst_percent[0], (int) pktnb_stats[0]);
650 if (burst_stats[0] == total_burst) {
654 if (burst_stats[0] + burst_stats[1] == total_burst) {
655 printf(" + %d%% of %d pkts]\n",
656 100 - burst_percent[0], pktnb_stats[1]);
659 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
660 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
661 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
662 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
665 printf(" + %d%% of %d pkts + %d%% of others]\n",
666 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
668 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
671 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
673 struct rte_port *port;
676 static const char *fwd_stats_border = "----------------------";
678 port = &ports[port_id];
679 printf("\n %s Forward statistics for port %-2d %s\n",
680 fwd_stats_border, port_id, fwd_stats_border);
682 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
683 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
685 stats->ipackets, stats->ierrors,
686 (uint64_t) (stats->ipackets + stats->ierrors));
688 if (cur_fwd_eng == &csum_fwd_engine)
689 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
690 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
692 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
694 stats->opackets, port->tx_dropped,
695 (uint64_t) (stats->opackets + port->tx_dropped));
697 if (stats->rx_nombuf > 0)
698 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
702 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
704 stats->ipackets, stats->ierrors,
705 (uint64_t) (stats->ipackets + stats->ierrors));
707 if (cur_fwd_eng == &csum_fwd_engine)
708 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
709 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
711 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
713 stats->opackets, port->tx_dropped,
714 (uint64_t) (stats->opackets + port->tx_dropped));
716 if (stats->rx_nombuf > 0)
717 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
719 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
721 pkt_burst_stats_display("RX",
722 &port->rx_stream->rx_burst_stats);
724 pkt_burst_stats_display("TX",
725 &port->tx_stream->tx_burst_stats);
728 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
729 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
733 if (port->rx_queue_stats_mapping_enabled) {
735 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
736 printf(" Stats reg %2d RX-packets:%14"PRIu64
737 " RX-errors:%14"PRIu64
738 " RX-bytes:%14"PRIu64"\n",
739 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
743 if (port->tx_queue_stats_mapping_enabled) {
744 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
745 printf(" Stats reg %2d TX-packets:%14"PRIu64
746 " TX-bytes:%14"PRIu64"\n",
747 i, stats->q_opackets[i], stats->q_obytes[i]);
751 printf(" %s--------------------------------%s\n",
752 fwd_stats_border, fwd_stats_border);
756 fwd_stream_stats_display(streamid_t stream_id)
758 struct fwd_stream *fs;
759 static const char *fwd_top_stats_border = "-------";
761 fs = fwd_streams[stream_id];
762 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
763 (fs->fwd_dropped == 0))
765 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
766 "TX Port=%2d/Queue=%2d %s\n",
767 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
768 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
769 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
770 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
772 /* if checksum mode */
773 if (cur_fwd_eng == &csum_fwd_engine) {
774 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
775 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
778 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
779 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
780 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
785 flush_fwd_rx_queues(void)
787 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
795 for (j = 0; j < 2; j++) {
796 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
797 for (rxq = 0; rxq < nb_rxq; rxq++) {
798 port_id = fwd_ports_ids[rxp];
800 nb_rx = rte_eth_rx_burst(port_id, rxq,
801 pkts_burst, MAX_PKT_BURST);
802 for (i = 0; i < nb_rx; i++)
803 rte_pktmbuf_free(pkts_burst[i]);
807 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
812 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
814 struct fwd_stream **fsm;
818 fsm = &fwd_streams[fc->stream_idx];
819 nb_fs = fc->stream_nb;
821 for (sm_id = 0; sm_id < nb_fs; sm_id++)
822 (*pkt_fwd)(fsm[sm_id]);
823 } while (! fc->stopped);
827 start_pkt_forward_on_core(void *fwd_arg)
829 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
830 cur_fwd_config.fwd_eng->packet_fwd);
835 * Run the TXONLY packet forwarding engine to send a single burst of packets.
836 * Used to start communication flows in network loopback test configurations.
839 run_one_txonly_burst_on_core(void *fwd_arg)
841 struct fwd_lcore *fwd_lc;
842 struct fwd_lcore tmp_lcore;
844 fwd_lc = (struct fwd_lcore *) fwd_arg;
846 tmp_lcore.stopped = 1;
847 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
852 * Launch packet forwarding:
853 * - Setup per-port forwarding context.
854 * - launch logical cores with their forwarding configuration.
857 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
859 port_fwd_begin_t port_fwd_begin;
864 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
865 if (port_fwd_begin != NULL) {
866 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
867 (*port_fwd_begin)(fwd_ports_ids[i]);
869 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
870 lc_id = fwd_lcores_cpuids[i];
871 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
872 fwd_lcores[i]->stopped = 0;
873 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
874 fwd_lcores[i], lc_id);
876 printf("launch lcore %u failed - diag=%d\n",
883 * Launch packet forwarding configuration.
886 start_packet_forwarding(int with_tx_first)
888 port_fwd_begin_t port_fwd_begin;
889 port_fwd_end_t port_fwd_end;
890 struct rte_port *port;
895 if (all_ports_started() == 0) {
896 printf("Not all ports were started\n");
899 if (test_done == 0) {
900 printf("Packet forwarding already started\n");
904 for (i = 0; i < nb_fwd_ports; i++) {
905 pt_id = fwd_ports_ids[i];
906 port = &ports[pt_id];
907 if (!port->dcb_flag) {
908 printf("In DCB mode, all forwarding ports must "
909 "be configured in this mode.\n");
913 if (nb_fwd_lcores == 1) {
914 printf("In DCB mode,the nb forwarding cores "
915 "should be larger than 1.\n");
922 flush_fwd_rx_queues();
925 rxtx_config_display();
927 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
928 pt_id = fwd_ports_ids[i];
929 port = &ports[pt_id];
930 rte_eth_stats_get(pt_id, &port->stats);
931 port->tx_dropped = 0;
933 map_port_queue_stats_mapping_registers(pt_id, port);
935 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
936 fwd_streams[sm_id]->rx_packets = 0;
937 fwd_streams[sm_id]->tx_packets = 0;
938 fwd_streams[sm_id]->fwd_dropped = 0;
939 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
940 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
942 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
943 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
944 sizeof(fwd_streams[sm_id]->rx_burst_stats));
945 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
946 sizeof(fwd_streams[sm_id]->tx_burst_stats));
948 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
949 fwd_streams[sm_id]->core_cycles = 0;
953 port_fwd_begin = tx_only_engine.port_fwd_begin;
954 if (port_fwd_begin != NULL) {
955 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
956 (*port_fwd_begin)(fwd_ports_ids[i]);
958 launch_packet_forwarding(run_one_txonly_burst_on_core);
959 rte_eal_mp_wait_lcore();
960 port_fwd_end = tx_only_engine.port_fwd_end;
961 if (port_fwd_end != NULL) {
962 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
963 (*port_fwd_end)(fwd_ports_ids[i]);
966 launch_packet_forwarding(start_pkt_forward_on_core);
970 stop_packet_forwarding(void)
972 struct rte_eth_stats stats;
973 struct rte_port *port;
974 port_fwd_end_t port_fwd_end;
981 uint64_t total_rx_dropped;
982 uint64_t total_tx_dropped;
983 uint64_t total_rx_nombuf;
985 uint64_t rx_bad_ip_csum;
986 uint64_t rx_bad_l4_csum;
987 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
990 static const char *acc_stats_border = "+++++++++++++++";
992 if (all_ports_started() == 0) {
993 printf("Not all ports were started\n");
997 printf("Packet forwarding not started\n");
1000 printf("Telling cores to stop...");
1001 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1002 fwd_lcores[lc_id]->stopped = 1;
1003 printf("\nWaiting for lcores to finish...\n");
1004 rte_eal_mp_wait_lcore();
1005 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1006 if (port_fwd_end != NULL) {
1007 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1008 pt_id = fwd_ports_ids[i];
1009 (*port_fwd_end)(pt_id);
1012 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1015 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1016 if (cur_fwd_config.nb_fwd_streams >
1017 cur_fwd_config.nb_fwd_ports) {
1018 fwd_stream_stats_display(sm_id);
1019 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1020 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1022 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1024 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1027 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1028 tx_dropped = (uint64_t) (tx_dropped +
1029 fwd_streams[sm_id]->fwd_dropped);
1030 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1033 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1034 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1035 fwd_streams[sm_id]->rx_bad_ip_csum);
1036 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1040 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1041 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1042 fwd_streams[sm_id]->rx_bad_l4_csum);
1043 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1046 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1047 fwd_cycles = (uint64_t) (fwd_cycles +
1048 fwd_streams[sm_id]->core_cycles);
1053 total_rx_dropped = 0;
1054 total_tx_dropped = 0;
1055 total_rx_nombuf = 0;
1056 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1057 pt_id = fwd_ports_ids[i];
1059 port = &ports[pt_id];
1060 rte_eth_stats_get(pt_id, &stats);
1061 stats.ipackets -= port->stats.ipackets;
1062 port->stats.ipackets = 0;
1063 stats.opackets -= port->stats.opackets;
1064 port->stats.opackets = 0;
1065 stats.ibytes -= port->stats.ibytes;
1066 port->stats.ibytes = 0;
1067 stats.obytes -= port->stats.obytes;
1068 port->stats.obytes = 0;
1069 stats.ierrors -= port->stats.ierrors;
1070 port->stats.ierrors = 0;
1071 stats.oerrors -= port->stats.oerrors;
1072 port->stats.oerrors = 0;
1073 stats.rx_nombuf -= port->stats.rx_nombuf;
1074 port->stats.rx_nombuf = 0;
1075 stats.fdirmatch -= port->stats.fdirmatch;
1076 port->stats.rx_nombuf = 0;
1077 stats.fdirmiss -= port->stats.fdirmiss;
1078 port->stats.rx_nombuf = 0;
1080 total_recv += stats.ipackets;
1081 total_xmit += stats.opackets;
1082 total_rx_dropped += stats.ierrors;
1083 total_tx_dropped += port->tx_dropped;
1084 total_rx_nombuf += stats.rx_nombuf;
1086 fwd_port_stats_display(pt_id, &stats);
1088 printf("\n %s Accumulated forward statistics for all ports"
1090 acc_stats_border, acc_stats_border);
1091 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1093 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1095 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1096 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1097 if (total_rx_nombuf > 0)
1098 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1099 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1101 acc_stats_border, acc_stats_border);
1102 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1104 printf("\n CPU cycles/packet=%u (total cycles="
1105 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1106 (unsigned int)(fwd_cycles / total_recv),
1107 fwd_cycles, total_recv);
1109 printf("\nDone.\n");
1114 all_ports_started(void)
1117 struct rte_port *port;
1119 for (pi = 0; pi < nb_ports; pi++) {
1121 /* Check if there is a port which is not started */
1122 if (port->port_status != RTE_PORT_STARTED)
1126 /* No port is not started */
1131 start_port(portid_t pid)
1133 int diag, need_check_link_status = 0;
1136 struct rte_port *port;
1138 if (test_done == 0) {
1139 printf("Please stop forwarding first\n");
1143 if (init_fwd_streams() < 0) {
1144 printf("Fail from init_fwd_streams()\n");
1150 for (pi = 0; pi < nb_ports; pi++) {
1151 if (pid < nb_ports && pid != pi)
1155 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1156 RTE_PORT_HANDLING) == 0) {
1157 printf("Port %d is now not stopped\n", pi);
1161 if (port->need_reconfig > 0) {
1162 port->need_reconfig = 0;
1164 printf("Configuring Port %d (socket %d)\n", pi,
1165 rte_eth_dev_socket_id(pi));
1166 /* configure port */
1167 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1170 if (rte_atomic16_cmpset(&(port->port_status),
1171 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1172 printf("Port %d can not be set back "
1173 "to stopped\n", pi);
1174 printf("Fail to configure port %d\n", pi);
1175 /* try to reconfigure port next time */
1176 port->need_reconfig = 1;
1180 if (port->need_reconfig_queues > 0) {
1181 port->need_reconfig_queues = 0;
1182 /* setup tx queues */
1183 for (qi = 0; qi < nb_txq; qi++) {
1184 if ((numa_support) &&
1185 (txring_numa[pi] != NUMA_NO_CONFIG))
1186 diag = rte_eth_tx_queue_setup(pi, qi,
1187 nb_txd,txring_numa[pi],
1190 diag = rte_eth_tx_queue_setup(pi, qi,
1191 nb_txd,port->socket_id,
1197 /* Fail to setup tx queue, return */
1198 if (rte_atomic16_cmpset(&(port->port_status),
1200 RTE_PORT_STOPPED) == 0)
1201 printf("Port %d can not be set back "
1202 "to stopped\n", pi);
1203 printf("Fail to configure port %d tx queues\n", pi);
1204 /* try to reconfigure queues next time */
1205 port->need_reconfig_queues = 1;
1208 /* setup rx queues */
1209 for (qi = 0; qi < nb_rxq; qi++) {
1210 if ((numa_support) &&
1211 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1212 struct rte_mempool * mp =
1213 mbuf_pool_find(rxring_numa[pi]);
1215 printf("Failed to setup RX queue:"
1216 "No mempool allocation"
1217 "on the socket %d\n",
1222 diag = rte_eth_rx_queue_setup(pi, qi,
1223 nb_rxd,rxring_numa[pi],
1224 &(port->rx_conf),mp);
1227 diag = rte_eth_rx_queue_setup(pi, qi,
1228 nb_rxd,port->socket_id,
1230 mbuf_pool_find(port->socket_id));
1236 /* Fail to setup rx queue, return */
1237 if (rte_atomic16_cmpset(&(port->port_status),
1239 RTE_PORT_STOPPED) == 0)
1240 printf("Port %d can not be set back "
1241 "to stopped\n", pi);
1242 printf("Fail to configure port %d rx queues\n", pi);
1243 /* try to reconfigure queues next time */
1244 port->need_reconfig_queues = 1;
1249 if (rte_eth_dev_start(pi) < 0) {
1250 printf("Fail to start port %d\n", pi);
1252 /* Fail to setup rx queue, return */
1253 if (rte_atomic16_cmpset(&(port->port_status),
1254 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1255 printf("Port %d can not be set back to "
1260 if (rte_atomic16_cmpset(&(port->port_status),
1261 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1262 printf("Port %d can not be set into started\n", pi);
1264 /* at least one port started, need checking link status */
1265 need_check_link_status = 1;
1268 if (need_check_link_status)
1269 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1271 printf("Please stop the ports first\n");
1277 stop_port(portid_t pid)
1280 struct rte_port *port;
1281 int need_check_link_status = 0;
1283 if (test_done == 0) {
1284 printf("Please stop forwarding first\n");
1291 printf("Stopping ports...\n");
1293 for (pi = 0; pi < nb_ports; pi++) {
1294 if (pid < nb_ports && pid != pi)
1298 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1299 RTE_PORT_HANDLING) == 0)
1302 rte_eth_dev_stop(pi);
1304 if (rte_atomic16_cmpset(&(port->port_status),
1305 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1306 printf("Port %d can not be set into stopped\n", pi);
1307 need_check_link_status = 1;
1309 if (need_check_link_status)
1310 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1316 close_port(portid_t pid)
1319 struct rte_port *port;
1321 if (test_done == 0) {
1322 printf("Please stop forwarding first\n");
1326 printf("Closing ports...\n");
1328 for (pi = 0; pi < nb_ports; pi++) {
1329 if (pid < nb_ports && pid != pi)
1333 if (rte_atomic16_cmpset(&(port->port_status),
1334 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1335 printf("Port %d is now not stopped\n", pi);
1339 rte_eth_dev_close(pi);
1341 if (rte_atomic16_cmpset(&(port->port_status),
1342 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1343 printf("Port %d can not be set into stopped\n", pi);
1350 all_ports_stopped(void)
1353 struct rte_port *port;
1355 for (pi = 0; pi < nb_ports; pi++) {
1357 if (port->port_status != RTE_PORT_STOPPED)
1369 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1370 printf("Stopping port %d...", pt_id);
1372 rte_eth_dev_close(pt_id);
1378 typedef void (*cmd_func_t)(void);
1379 struct pmd_test_command {
1380 const char *cmd_name;
1381 cmd_func_t cmd_func;
1384 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1386 /* Check the link status of all ports in up to 9s, and print them finally */
1388 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1390 #define CHECK_INTERVAL 100 /* 100ms */
1391 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1392 uint8_t portid, count, all_ports_up, print_flag = 0;
1393 struct rte_eth_link link;
1395 printf("Checking link statuses...\n");
1397 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1399 for (portid = 0; portid < port_num; portid++) {
1400 if ((port_mask & (1 << portid)) == 0)
1402 memset(&link, 0, sizeof(link));
1403 rte_eth_link_get_nowait(portid, &link);
1404 /* print link status if flag set */
1405 if (print_flag == 1) {
1406 if (link.link_status)
1407 printf("Port %d Link Up - speed %u "
1408 "Mbps - %s\n", (uint8_t)portid,
1409 (unsigned)link.link_speed,
1410 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1411 ("full-duplex") : ("half-duplex\n"));
1413 printf("Port %d Link Down\n",
1417 /* clear all_ports_up flag if any link down */
1418 if (link.link_status == 0) {
1423 /* after finally printing all link status, get out */
1424 if (print_flag == 1)
1427 if (all_ports_up == 0) {
1429 rte_delay_ms(CHECK_INTERVAL);
1432 /* set the print_flag if all ports up or timeout */
1433 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1440 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1444 uint8_t mapping_found = 0;
1446 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1447 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1448 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1449 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1450 tx_queue_stats_mappings[i].queue_id,
1451 tx_queue_stats_mappings[i].stats_counter_id);
1458 port->tx_queue_stats_mapping_enabled = 1;
1463 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1467 uint8_t mapping_found = 0;
1469 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1470 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1471 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1472 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1473 rx_queue_stats_mappings[i].queue_id,
1474 rx_queue_stats_mappings[i].stats_counter_id);
1481 port->rx_queue_stats_mapping_enabled = 1;
1486 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1490 diag = set_tx_queue_stats_mapping_registers(pi, port);
1492 if (diag == -ENOTSUP) {
1493 port->tx_queue_stats_mapping_enabled = 0;
1494 printf("TX queue stats mapping not supported port id=%d\n", pi);
1497 rte_exit(EXIT_FAILURE,
1498 "set_tx_queue_stats_mapping_registers "
1499 "failed for port id=%d diag=%d\n",
1503 diag = set_rx_queue_stats_mapping_registers(pi, port);
1505 if (diag == -ENOTSUP) {
1506 port->rx_queue_stats_mapping_enabled = 0;
1507 printf("RX queue stats mapping not supported port id=%d\n", pi);
1510 rte_exit(EXIT_FAILURE,
1511 "set_rx_queue_stats_mapping_registers "
1512 "failed for port id=%d diag=%d\n",
1518 init_port_config(void)
1521 struct rte_port *port;
1523 for (pid = 0; pid < nb_ports; pid++) {
1525 port->dev_conf.rxmode = rx_mode;
1526 port->dev_conf.fdir_conf = fdir_conf;
1528 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1529 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1531 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1532 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1534 port->rx_conf.rx_thresh = rx_thresh;
1535 port->rx_conf.rx_free_thresh = rx_free_thresh;
1536 port->rx_conf.rx_drop_en = rx_drop_en;
1537 port->tx_conf.tx_thresh = tx_thresh;
1538 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1539 port->tx_conf.tx_free_thresh = tx_free_thresh;
1540 port->tx_conf.txq_flags = txq_flags;
1542 rte_eth_macaddr_get(pid, &port->eth_addr);
1544 map_port_queue_stats_mapping_registers(pid, port);
1548 const uint16_t vlan_tags[] = {
1549 0, 1, 2, 3, 4, 5, 6, 7,
1550 8, 9, 10, 11, 12, 13, 14, 15,
1551 16, 17, 18, 19, 20, 21, 22, 23,
1552 24, 25, 26, 27, 28, 29, 30, 31
1556 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1561 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1562 * given above, and the number of traffic classes available for use.
1564 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1565 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1566 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1568 /* VMDQ+DCB RX and TX configrations */
1569 vmdq_rx_conf.enable_default_pool = 0;
1570 vmdq_rx_conf.default_pool = 0;
1571 vmdq_rx_conf.nb_queue_pools =
1572 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1573 vmdq_tx_conf.nb_queue_pools =
1574 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1576 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1577 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1578 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1579 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1581 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1582 vmdq_rx_conf.dcb_queue[i] = i;
1583 vmdq_tx_conf.dcb_queue[i] = i;
1586 /*set DCB mode of RX and TX of multiple queues*/
1587 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1588 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1589 if (dcb_conf->pfc_en)
1590 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1592 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1594 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1595 sizeof(struct rte_eth_vmdq_dcb_conf)));
1596 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1597 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1600 struct rte_eth_dcb_rx_conf rx_conf;
1601 struct rte_eth_dcb_tx_conf tx_conf;
1603 /* queue mapping configuration of DCB RX and TX */
1604 if (dcb_conf->num_tcs == ETH_4_TCS)
1605 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1607 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1609 rx_conf.nb_tcs = dcb_conf->num_tcs;
1610 tx_conf.nb_tcs = dcb_conf->num_tcs;
1612 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1613 rx_conf.dcb_queue[i] = i;
1614 tx_conf.dcb_queue[i] = i;
1616 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1617 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1618 if (dcb_conf->pfc_en)
1619 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1621 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1623 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1624 sizeof(struct rte_eth_dcb_rx_conf)));
1625 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1626 sizeof(struct rte_eth_dcb_tx_conf)));
1633 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1635 struct rte_eth_conf port_conf;
1636 struct rte_port *rte_port;
1641 /* rxq and txq configuration in dcb mode */
1644 rx_free_thresh = 64;
1646 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1647 /* Enter DCB configuration status */
1650 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1651 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1652 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1656 rte_port = &ports[pid];
1657 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1659 rte_port->rx_conf.rx_thresh = rx_thresh;
1660 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1661 rte_port->tx_conf.tx_thresh = tx_thresh;
1662 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1663 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1665 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1666 for (i = 0; i < nb_vlan; i++){
1667 rx_vft_set(pid, vlan_tags[i], 1);
1670 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1671 map_port_queue_stats_mapping_registers(pid, rte_port);
1673 rte_port->dcb_flag = 1;
1678 #ifdef RTE_EXEC_ENV_BAREMETAL
1683 main(int argc, char** argv)
1688 diag = rte_eal_init(argc, argv);
1690 rte_panic("Cannot init EAL\n");
1692 if (rte_pmd_init_all())
1693 rte_panic("Cannot init PMD\n");
1695 if (rte_eal_pci_probe())
1696 rte_panic("Cannot probe PCI\n");
1698 nb_ports = (portid_t) rte_eth_dev_count();
1700 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1702 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1703 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1704 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1705 "configuration file\n");
1707 set_def_fwd_config();
1709 rte_panic("Empty set of forwarding logical cores - check the "
1710 "core mask supplied in the command parameters\n");
1715 launch_args_parse(argc, argv);
1717 if (nb_rxq > nb_txq)
1718 printf("Warning: nb_rxq=%d enables RSS configuration, "
1719 "but nb_txq=%d will prevent to fully test it.\n",
1723 start_port(RTE_PORT_ALL);
1725 /* set all ports to promiscuous mode by default */
1726 for (port_id = 0; port_id < nb_ports; port_id++)
1727 rte_eth_promiscuous_enable(port_id);
1729 if (interactive == 1)
1735 printf("No commandline core given, start packet forwarding\n");
1736 start_packet_forwarding(0);
1737 printf("Press enter to exit\n");
1738 rc = read(0, &c, 1);