4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_tailq.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
79 uint16_t verbose_level = 0; /**< Silent by default. */
81 /* use master core for command line ? */
82 uint8_t interactive = 0;
85 * NUMA support configuration.
86 * When set, the NUMA support attempts to dispatch the allocation of the
87 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
88 * probed ports among the CPU sockets 0 and 1.
89 * Otherwise, all memory is allocated from CPU socket 0.
91 uint8_t numa_support = 0; /**< No numa support by default */
94 * Record the Ethernet address of peer target ports to which packets are
96 * Must be instanciated with the ethernet addresses of peer traffic generator
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
103 * Probed Target Environment.
105 struct rte_port *ports; /**< For all probed ethernet ports. */
106 portid_t nb_ports; /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
111 * Test Forwarding Configuration.
112 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t nb_cfg_ports; /**< Number of configured ports. */
118 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
127 * Forwarding engines.
129 struct fwd_engine * fwd_engines[] = {
135 #ifdef RTE_LIBRTE_IEEE1588
136 &ieee1588_fwd_engine,
141 struct fwd_config cur_fwd_config;
142 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
144 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
145 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
146 * specified on command-line. */
149 * Configuration of packet segments used by the "txonly" processing engine.
151 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
152 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
153 TXONLY_DEF_PACKET_LEN,
155 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
157 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
158 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
160 /* current configuration is in DCB or not,0 means it is not in DCB mode */
161 uint8_t dcb_config = 0;
163 /* Whether the dcb is in testing status */
164 uint8_t dcb_test = 0;
166 /* DCB on and VT on mapping is default */
167 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
170 * Configurable number of RX/TX queues.
172 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
173 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
176 * Configurable number of RX/TX ring descriptors.
178 #define RTE_TEST_RX_DESC_DEFAULT 128
179 #define RTE_TEST_TX_DESC_DEFAULT 512
180 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
181 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
184 * Configurable values of RX and TX ring threshold registers.
186 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
187 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
188 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
190 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
191 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
192 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
194 struct rte_eth_thresh rx_thresh = {
195 .pthresh = RX_PTHRESH,
196 .hthresh = RX_HTHRESH,
197 .wthresh = RX_WTHRESH,
200 struct rte_eth_thresh tx_thresh = {
201 .pthresh = TX_PTHRESH,
202 .hthresh = TX_HTHRESH,
203 .wthresh = TX_WTHRESH,
207 * Configurable value of RX free threshold.
209 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
212 * Configurable value of RX drop enable.
214 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
217 * Configurable value of TX free threshold.
219 uint16_t tx_free_thresh = 0; /* Use default values. */
222 * Configurable value of TX RS bit threshold.
224 uint16_t tx_rs_thresh = 0; /* Use default values. */
227 * Configurable value of TX queue flags.
229 uint32_t txq_flags = 0; /* No flags set. */
232 * Receive Side Scaling (RSS) configuration.
234 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
237 * Port topology configuration
239 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
242 * Ethernet device configuration.
244 struct rte_eth_rxmode rx_mode = {
245 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
247 .header_split = 0, /**< Header Split disabled. */
248 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
249 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
250 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
251 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
252 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
253 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
256 struct rte_fdir_conf fdir_conf = {
257 .mode = RTE_FDIR_MODE_NONE,
258 .pballoc = RTE_FDIR_PBALLOC_64K,
259 .status = RTE_FDIR_REPORT_STATUS,
260 .flexbytes_offset = 0x6,
264 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
266 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
267 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
269 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
270 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
272 uint16_t nb_tx_queue_stats_mappings = 0;
273 uint16_t nb_rx_queue_stats_mappings = 0;
275 /* Forward function declarations */
276 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
277 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
280 * Check if all the ports are started.
281 * If yes, return positive value. If not, return zero.
283 static int all_ports_started(void);
286 * Setup default configuration.
289 set_default_fwd_lcores_config(void)
295 for (i = 0; i < RTE_MAX_LCORE; i++) {
296 if (! rte_lcore_is_enabled(i))
298 if (i == rte_get_master_lcore())
300 fwd_lcores_cpuids[nb_lc++] = i;
302 nb_lcores = (lcoreid_t) nb_lc;
303 nb_cfg_lcores = nb_lcores;
308 set_def_peer_eth_addrs(void)
312 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
313 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
314 peer_eth_addrs[i].addr_bytes[5] = i;
319 set_default_fwd_ports_config(void)
323 for (pt_id = 0; pt_id < nb_ports; pt_id++)
324 fwd_ports_ids[pt_id] = pt_id;
326 nb_cfg_ports = nb_ports;
327 nb_fwd_ports = nb_ports;
331 set_def_fwd_config(void)
333 set_default_fwd_lcores_config();
334 set_def_peer_eth_addrs();
335 set_default_fwd_ports_config();
339 * Configuration initialisation done once at init time.
341 struct mbuf_ctor_arg {
342 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
343 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
346 struct mbuf_pool_ctor_arg {
347 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
351 testpmd_mbuf_ctor(struct rte_mempool *mp,
354 __attribute__((unused)) unsigned i)
356 struct mbuf_ctor_arg *mb_ctor_arg;
359 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
360 mb = (struct rte_mbuf *) raw_mbuf;
362 mb->type = RTE_MBUF_PKT;
364 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
365 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
366 mb_ctor_arg->seg_buf_offset);
367 mb->buf_len = mb_ctor_arg->seg_buf_size;
368 mb->type = RTE_MBUF_PKT;
370 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
372 mb->pkt.vlan_macip.data = 0;
373 mb->pkt.hash.rss = 0;
377 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
380 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
381 struct rte_pktmbuf_pool_private *mbp_priv;
383 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
384 printf("%s(%s) private_data_size %d < %d\n",
385 __func__, mp->name, (int) mp->private_data_size,
386 (int) sizeof(struct rte_pktmbuf_pool_private));
389 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
390 mbp_priv = (struct rte_pktmbuf_pool_private *)
391 ((char *)mp + sizeof(struct rte_mempool));
392 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
396 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
397 unsigned int socket_id)
399 char pool_name[RTE_MEMPOOL_NAMESIZE];
400 struct rte_mempool *rte_mp;
401 struct mbuf_pool_ctor_arg mbp_ctor_arg;
402 struct mbuf_ctor_arg mb_ctor_arg;
405 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
407 mb_ctor_arg.seg_buf_offset =
408 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
409 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
410 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
411 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
412 rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
413 (unsigned) mb_mempool_cache,
414 sizeof(struct rte_pktmbuf_pool_private),
415 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
416 testpmd_mbuf_ctor, &mb_ctor_arg,
418 if (rte_mp == NULL) {
419 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
420 "failed\n", socket_id);
428 struct rte_port *port;
429 struct rte_mempool *mbp;
430 unsigned int nb_mbuf_per_pool;
433 /* Configuration of logical cores. */
434 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
435 sizeof(struct fwd_lcore *) * nb_lcores,
437 if (fwd_lcores == NULL) {
438 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
439 "failed\n", nb_lcores);
441 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
442 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
443 sizeof(struct fwd_lcore),
445 if (fwd_lcores[lc_id] == NULL) {
446 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
449 fwd_lcores[lc_id]->cpuid_idx = lc_id;
453 * Create pools of mbuf.
454 * If NUMA support is disabled, create a single pool of mbuf in
456 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
458 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
459 * nb_txd can be configured at run time.
461 if (param_total_num_mbufs)
462 nb_mbuf_per_pool = param_total_num_mbufs;
464 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
465 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
466 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
469 nb_mbuf_per_pool /= 2;
470 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
471 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1);
473 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
477 * Records which Mbuf pool to use by each logical core, if needed.
479 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
480 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
482 mbp = mbuf_pool_find(0);
483 fwd_lcores[lc_id]->mbp = mbp;
486 /* Configuration of Ethernet ports. */
487 ports = rte_zmalloc("testpmd: ports",
488 sizeof(struct rte_port) * nb_ports,
491 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
492 "failed\n", nb_ports);
495 for (pid = 0; pid < nb_ports; pid++) {
497 rte_eth_dev_info_get(pid, &port->dev_info);
499 /* set flag to initialize port/queue */
500 port->need_reconfig = 1;
501 port->need_reconfig_queues = 1;
506 /* Configuration of packet forwarding streams. */
507 if (init_fwd_streams() < 0)
508 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
512 init_fwd_streams(void)
515 struct rte_port *port;
516 streamid_t sm_id, nb_fwd_streams_new;
518 /* set socket id according to numa or not */
519 for (pid = 0; pid < nb_ports; pid++) {
521 if (nb_rxq > port->dev_info.max_rx_queues) {
522 printf("Fail: nb_rxq(%d) is greater than "
523 "max_rx_queues(%d)\n", nb_rxq,
524 port->dev_info.max_rx_queues);
527 if (nb_txq > port->dev_info.max_tx_queues) {
528 printf("Fail: nb_txq(%d) is greater than "
529 "max_tx_queues(%d)\n", nb_txq,
530 port->dev_info.max_tx_queues);
534 port->socket_id = (pid < (nb_ports >> 1)) ? 0 : 1;
539 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
540 if (nb_fwd_streams_new == nb_fwd_streams)
543 if (fwd_streams != NULL) {
544 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
545 if (fwd_streams[sm_id] == NULL)
547 rte_free(fwd_streams[sm_id]);
548 fwd_streams[sm_id] = NULL;
550 rte_free(fwd_streams);
555 nb_fwd_streams = nb_fwd_streams_new;
556 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
557 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
558 if (fwd_streams == NULL)
559 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
560 "failed\n", nb_fwd_streams);
562 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
563 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
564 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
565 if (fwd_streams[sm_id] == NULL)
566 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
573 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
575 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
577 unsigned int total_burst;
578 unsigned int nb_burst;
579 unsigned int burst_stats[3];
580 uint16_t pktnb_stats[3];
582 int burst_percent[3];
585 * First compute the total number of packet bursts and the
586 * two highest numbers of bursts of the same number of packets.
589 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
590 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
591 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
592 nb_burst = pbs->pkt_burst_spread[nb_pkt];
595 total_burst += nb_burst;
596 if (nb_burst > burst_stats[0]) {
597 burst_stats[1] = burst_stats[0];
598 pktnb_stats[1] = pktnb_stats[0];
599 burst_stats[0] = nb_burst;
600 pktnb_stats[0] = nb_pkt;
603 if (total_burst == 0)
605 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
606 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
607 burst_percent[0], (int) pktnb_stats[0]);
608 if (burst_stats[0] == total_burst) {
612 if (burst_stats[0] + burst_stats[1] == total_burst) {
613 printf(" + %d%% of %d pkts]\n",
614 100 - burst_percent[0], pktnb_stats[1]);
617 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
618 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
619 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
620 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
623 printf(" + %d%% of %d pkts + %d%% of others]\n",
624 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
626 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
629 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
631 struct rte_port *port;
634 static const char *fwd_stats_border = "----------------------";
636 port = &ports[port_id];
637 printf("\n %s Forward statistics for port %-2d %s\n",
638 fwd_stats_border, port_id, fwd_stats_border);
640 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
641 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
643 stats->ipackets, stats->ierrors,
644 (uint64_t) (stats->ipackets + stats->ierrors));
646 if (cur_fwd_eng == &csum_fwd_engine)
647 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
648 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
650 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
652 stats->opackets, port->tx_dropped,
653 (uint64_t) (stats->opackets + port->tx_dropped));
655 if (stats->rx_nombuf > 0)
656 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
660 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
662 stats->ipackets, stats->ierrors,
663 (uint64_t) (stats->ipackets + stats->ierrors));
665 if (cur_fwd_eng == &csum_fwd_engine)
666 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
667 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
669 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
671 stats->opackets, port->tx_dropped,
672 (uint64_t) (stats->opackets + port->tx_dropped));
674 if (stats->rx_nombuf > 0)
675 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
677 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
679 pkt_burst_stats_display("RX",
680 &port->rx_stream->rx_burst_stats);
682 pkt_burst_stats_display("TX",
683 &port->tx_stream->tx_burst_stats);
686 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
687 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
691 if (port->rx_queue_stats_mapping_enabled) {
693 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
694 printf(" Stats reg %2d RX-packets:%14"PRIu64
695 " RX-errors:%14"PRIu64
696 " RX-bytes:%14"PRIu64"\n",
697 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
701 if (port->tx_queue_stats_mapping_enabled) {
702 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
703 printf(" Stats reg %2d TX-packets:%14"PRIu64
704 " TX-bytes:%14"PRIu64"\n",
705 i, stats->q_opackets[i], stats->q_obytes[i]);
709 printf(" %s--------------------------------%s\n",
710 fwd_stats_border, fwd_stats_border);
714 fwd_stream_stats_display(streamid_t stream_id)
716 struct fwd_stream *fs;
717 static const char *fwd_top_stats_border = "-------";
719 fs = fwd_streams[stream_id];
720 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
721 (fs->fwd_dropped == 0))
723 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
724 "TX Port=%2d/Queue=%2d %s\n",
725 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
726 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
727 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
728 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
730 /* if checksum mode */
731 if (cur_fwd_eng == &csum_fwd_engine) {
732 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
733 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
736 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
737 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
738 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
743 flush_all_rx_queues(void)
745 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
752 for (j = 0; j < 2; j++) {
753 for (rxp = 0; rxp < nb_ports; rxp++) {
754 for (rxq = 0; rxq < nb_rxq; rxq++) {
756 nb_rx = rte_eth_rx_burst(rxp, rxq,
757 pkts_burst, MAX_PKT_BURST);
758 for (i = 0; i < nb_rx; i++)
759 rte_pktmbuf_free(pkts_burst[i]);
763 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
768 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
770 struct fwd_stream **fsm;
774 fsm = &fwd_streams[fc->stream_idx];
775 nb_fs = fc->stream_nb;
777 for (sm_id = 0; sm_id < nb_fs; sm_id++)
778 (*pkt_fwd)(fsm[sm_id]);
779 } while (! fc->stopped);
783 start_pkt_forward_on_core(void *fwd_arg)
785 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
786 cur_fwd_config.fwd_eng->packet_fwd);
791 * Run the TXONLY packet forwarding engine to send a single burst of packets.
792 * Used to start communication flows in network loopback test configurations.
795 run_one_txonly_burst_on_core(void *fwd_arg)
797 struct fwd_lcore *fwd_lc;
798 struct fwd_lcore tmp_lcore;
800 fwd_lc = (struct fwd_lcore *) fwd_arg;
802 tmp_lcore.stopped = 1;
803 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
808 * Launch packet forwarding:
809 * - Setup per-port forwarding context.
810 * - launch logical cores with their forwarding configuration.
813 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
815 port_fwd_begin_t port_fwd_begin;
820 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
821 if (port_fwd_begin != NULL) {
822 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
823 (*port_fwd_begin)(fwd_ports_ids[i]);
825 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
826 lc_id = fwd_lcores_cpuids[i];
827 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
828 fwd_lcores[i]->stopped = 0;
829 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
830 fwd_lcores[i], lc_id);
832 printf("launch lcore %u failed - diag=%d\n",
839 * Launch packet forwarding configuration.
842 start_packet_forwarding(int with_tx_first)
844 port_fwd_begin_t port_fwd_begin;
845 port_fwd_end_t port_fwd_end;
846 struct rte_port *port;
851 if (all_ports_started() == 0) {
852 printf("Not all ports were started\n");
855 if (test_done == 0) {
856 printf("Packet forwarding already started\n");
859 if((dcb_test) && (nb_fwd_lcores == 1)) {
860 printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
864 flush_all_rx_queues();
866 rxtx_config_display();
868 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
869 pt_id = fwd_ports_ids[i];
870 port = &ports[pt_id];
871 rte_eth_stats_get(pt_id, &port->stats);
872 port->tx_dropped = 0;
874 map_port_queue_stats_mapping_registers(pt_id, port);
876 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
877 fwd_streams[sm_id]->rx_packets = 0;
878 fwd_streams[sm_id]->tx_packets = 0;
879 fwd_streams[sm_id]->fwd_dropped = 0;
880 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
881 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
883 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
884 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
885 sizeof(fwd_streams[sm_id]->rx_burst_stats));
886 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
887 sizeof(fwd_streams[sm_id]->tx_burst_stats));
889 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
890 fwd_streams[sm_id]->core_cycles = 0;
894 port_fwd_begin = tx_only_engine.port_fwd_begin;
895 if (port_fwd_begin != NULL) {
896 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
897 (*port_fwd_begin)(fwd_ports_ids[i]);
899 launch_packet_forwarding(run_one_txonly_burst_on_core);
900 rte_eal_mp_wait_lcore();
901 port_fwd_end = tx_only_engine.port_fwd_end;
902 if (port_fwd_end != NULL) {
903 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
904 (*port_fwd_end)(fwd_ports_ids[i]);
907 launch_packet_forwarding(start_pkt_forward_on_core);
911 stop_packet_forwarding(void)
913 struct rte_eth_stats stats;
914 struct rte_port *port;
915 port_fwd_end_t port_fwd_end;
922 uint64_t total_rx_dropped;
923 uint64_t total_tx_dropped;
924 uint64_t total_rx_nombuf;
926 uint64_t rx_bad_ip_csum;
927 uint64_t rx_bad_l4_csum;
928 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
931 static const char *acc_stats_border = "+++++++++++++++";
933 if (all_ports_started() == 0) {
934 printf("Not all ports were started\n");
938 printf("Packet forwarding not started\n");
941 printf("Telling cores to stop...");
942 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
943 fwd_lcores[lc_id]->stopped = 1;
944 printf("\nWaiting for lcores to finish...\n");
945 rte_eal_mp_wait_lcore();
946 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
947 if (port_fwd_end != NULL) {
948 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
949 pt_id = fwd_ports_ids[i];
950 (*port_fwd_end)(pt_id);
953 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
956 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
957 if (cur_fwd_config.nb_fwd_streams >
958 cur_fwd_config.nb_fwd_ports) {
959 fwd_stream_stats_display(sm_id);
960 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
961 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
963 ports[fwd_streams[sm_id]->tx_port].tx_stream =
965 ports[fwd_streams[sm_id]->rx_port].rx_stream =
968 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
969 tx_dropped = (uint64_t) (tx_dropped +
970 fwd_streams[sm_id]->fwd_dropped);
971 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
974 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
975 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
976 fwd_streams[sm_id]->rx_bad_ip_csum);
977 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
981 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
982 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
983 fwd_streams[sm_id]->rx_bad_l4_csum);
984 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
987 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
988 fwd_cycles = (uint64_t) (fwd_cycles +
989 fwd_streams[sm_id]->core_cycles);
994 total_rx_dropped = 0;
995 total_tx_dropped = 0;
997 for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
998 pt_id = fwd_ports_ids[i];
1000 port = &ports[pt_id];
1001 rte_eth_stats_get(pt_id, &stats);
1002 stats.ipackets -= port->stats.ipackets;
1003 port->stats.ipackets = 0;
1004 stats.opackets -= port->stats.opackets;
1005 port->stats.opackets = 0;
1006 stats.ibytes -= port->stats.ibytes;
1007 port->stats.ibytes = 0;
1008 stats.obytes -= port->stats.obytes;
1009 port->stats.obytes = 0;
1010 stats.ierrors -= port->stats.ierrors;
1011 port->stats.ierrors = 0;
1012 stats.oerrors -= port->stats.oerrors;
1013 port->stats.oerrors = 0;
1014 stats.rx_nombuf -= port->stats.rx_nombuf;
1015 port->stats.rx_nombuf = 0;
1016 stats.fdirmatch -= port->stats.fdirmatch;
1017 port->stats.rx_nombuf = 0;
1018 stats.fdirmiss -= port->stats.fdirmiss;
1019 port->stats.rx_nombuf = 0;
1021 total_recv += stats.ipackets;
1022 total_xmit += stats.opackets;
1023 total_rx_dropped += stats.ierrors;
1024 total_tx_dropped += port->tx_dropped;
1025 total_rx_nombuf += stats.rx_nombuf;
1027 fwd_port_stats_display(pt_id, &stats);
1029 printf("\n %s Accumulated forward statistics for all ports"
1031 acc_stats_border, acc_stats_border);
1032 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1034 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1036 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1037 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1038 if (total_rx_nombuf > 0)
1039 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1040 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1042 acc_stats_border, acc_stats_border);
1043 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1045 printf("\n CPU cycles/packet=%u (total cycles="
1046 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1047 (unsigned int)(fwd_cycles / total_recv),
1048 fwd_cycles, total_recv);
1050 printf("\nDone.\n");
1055 all_ports_started(void)
1058 struct rte_port *port;
1060 for (pi = 0; pi < nb_ports; pi++) {
1062 /* Check if there is a port which is not started */
1063 if (port->port_status != RTE_PORT_STARTED)
1067 /* No port is not started */
1072 start_port(portid_t pid)
1074 int diag, need_check_link_status = 0;
1077 struct rte_port *port;
1079 if (test_done == 0) {
1080 printf("Please stop forwarding first\n");
1084 if (init_fwd_streams() < 0) {
1085 printf("Fail from init_fwd_streams()\n");
1091 for (pi = 0; pi < nb_ports; pi++) {
1092 if (pid < nb_ports && pid != pi)
1096 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1097 RTE_PORT_HANDLING) == 0) {
1098 printf("Port %d is now not stopped\n", pi);
1102 if (port->need_reconfig > 0) {
1103 port->need_reconfig = 0;
1105 printf("Configuring Port %d\n", pi);
1106 /* configure port */
1107 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1110 if (rte_atomic16_cmpset(&(port->port_status),
1111 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1112 printf("Port %d can not be set back "
1113 "to stopped\n", pi);
1114 printf("Fail to configure port %d\n", pi);
1115 /* try to reconfigure port next time */
1116 port->need_reconfig = 1;
1121 if (port->need_reconfig_queues > 0) {
1122 port->need_reconfig_queues = 0;
1124 /* setup tx queues */
1125 for (qi = 0; qi < nb_txq; qi++) {
1126 diag = rte_eth_tx_queue_setup(pi, qi, nb_txd,
1127 port->socket_id, &(port->tx_conf));
1131 /* Fail to setup tx queue, return */
1132 if (rte_atomic16_cmpset(&(port->port_status),
1134 RTE_PORT_STOPPED) == 0)
1135 printf("Port %d can not be set back "
1136 "to stopped\n", pi);
1137 printf("Fail to configure port %d tx queues\n", pi);
1138 /* try to reconfigure queues next time */
1139 port->need_reconfig_queues = 1;
1142 /* setup rx queues */
1143 for (qi = 0; qi < nb_rxq; qi++) {
1144 diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd,
1145 port->socket_id, &(port->rx_conf),
1146 mbuf_pool_find(port->socket_id));
1150 /* Fail to setup rx queue, return */
1151 if (rte_atomic16_cmpset(&(port->port_status),
1153 RTE_PORT_STOPPED) == 0)
1154 printf("Port %d can not be set back "
1155 "to stopped\n", pi);
1156 printf("Fail to configure port %d rx queues\n", pi);
1157 /* try to reconfigure queues next time */
1158 port->need_reconfig_queues = 1;
1164 if (rte_eth_dev_start(pi) < 0) {
1165 printf("Fail to start port %d\n", pi);
1167 /* Fail to setup rx queue, return */
1168 if (rte_atomic16_cmpset(&(port->port_status),
1169 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1170 printf("Port %d can not be set back to "
1175 if (rte_atomic16_cmpset(&(port->port_status),
1176 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1177 printf("Port %d can not be set into started\n", pi);
1179 /* at least one port started, need checking link status */
1180 need_check_link_status = 1;
1183 if (need_check_link_status)
1184 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1186 printf("Please stop the ports first\n");
1192 stop_port(portid_t pid)
1195 struct rte_port *port;
1196 int need_check_link_status = 0;
1198 if (test_done == 0) {
1199 printf("Please stop forwarding first\n");
1206 printf("Stopping ports...\n");
1208 for (pi = 0; pi < nb_ports; pi++) {
1209 if (pid < nb_ports && pid != pi)
1213 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1214 RTE_PORT_HANDLING) == 0)
1217 rte_eth_dev_stop(pi);
1219 if (rte_atomic16_cmpset(&(port->port_status),
1220 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1221 printf("Port %d can not be set into stopped\n", pi);
1222 need_check_link_status = 1;
1224 if (need_check_link_status)
1225 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1231 close_port(portid_t pid)
1234 struct rte_port *port;
1236 if (test_done == 0) {
1237 printf("Please stop forwarding first\n");
1241 printf("Closing ports...\n");
1243 for (pi = 0; pi < nb_ports; pi++) {
1244 if (pid < nb_ports && pid != pi)
1248 if (rte_atomic16_cmpset(&(port->port_status),
1249 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1250 printf("Port %d is now not stopped\n", pi);
1254 rte_eth_dev_close(pi);
1256 if (rte_atomic16_cmpset(&(port->port_status),
1257 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1258 printf("Port %d can not be set into stopped\n", pi);
1265 all_ports_stopped(void)
1268 struct rte_port *port;
1270 for (pi = 0; pi < nb_ports; pi++) {
1272 if (port->port_status != RTE_PORT_STOPPED)
1284 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1285 printf("Stopping port %d...", pt_id);
1287 rte_eth_dev_close(pt_id);
1293 typedef void (*cmd_func_t)(void);
1294 struct pmd_test_command {
1295 const char *cmd_name;
1296 cmd_func_t cmd_func;
1299 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1301 /* Check the link status of all ports in up to 9s, and print them finally */
1303 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1305 #define CHECK_INTERVAL 100 /* 100ms */
1306 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1307 uint8_t portid, count, all_ports_up, print_flag = 0;
1308 struct rte_eth_link link;
1310 printf("Checking link statuses...\n");
1312 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1314 for (portid = 0; portid < port_num; portid++) {
1315 if ((port_mask & (1 << portid)) == 0)
1317 memset(&link, 0, sizeof(link));
1318 rte_eth_link_get_nowait(portid, &link);
1319 /* print link status if flag set */
1320 if (print_flag == 1) {
1321 if (link.link_status)
1322 printf("Port %d Link Up - speed %u "
1323 "Mbps - %s\n", (uint8_t)portid,
1324 (unsigned)link.link_speed,
1325 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1326 ("full-duplex") : ("half-duplex\n"));
1328 printf("Port %d Link Down\n",
1332 /* clear all_ports_up flag if any link down */
1333 if (link.link_status == 0) {
1338 /* after finally printing all link status, get out */
1339 if (print_flag == 1)
1342 if (all_ports_up == 0) {
1344 rte_delay_ms(CHECK_INTERVAL);
1347 /* set the print_flag if all ports up or timeout */
1348 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1355 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1359 uint8_t mapping_found = 0;
1361 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1362 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1363 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1364 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1365 tx_queue_stats_mappings[i].queue_id,
1366 tx_queue_stats_mappings[i].stats_counter_id);
1373 port->tx_queue_stats_mapping_enabled = 1;
1378 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1382 uint8_t mapping_found = 0;
1384 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1385 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1386 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1387 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1388 rx_queue_stats_mappings[i].queue_id,
1389 rx_queue_stats_mappings[i].stats_counter_id);
1396 port->rx_queue_stats_mapping_enabled = 1;
1401 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1405 diag = set_tx_queue_stats_mapping_registers(pi, port);
1407 if (diag == -ENOTSUP) {
1408 port->tx_queue_stats_mapping_enabled = 0;
1409 printf("TX queue stats mapping not supported port id=%d\n", pi);
1412 rte_exit(EXIT_FAILURE,
1413 "set_tx_queue_stats_mapping_registers "
1414 "failed for port id=%d diag=%d\n",
1418 diag = set_rx_queue_stats_mapping_registers(pi, port);
1420 if (diag == -ENOTSUP) {
1421 port->rx_queue_stats_mapping_enabled = 0;
1422 printf("RX queue stats mapping not supported port id=%d\n", pi);
1425 rte_exit(EXIT_FAILURE,
1426 "set_rx_queue_stats_mapping_registers "
1427 "failed for port id=%d diag=%d\n",
1433 init_port_config(void)
1436 struct rte_port *port;
1438 for (pid = 0; pid < nb_ports; pid++) {
1440 port->dev_conf.rxmode = rx_mode;
1441 port->dev_conf.fdir_conf = fdir_conf;
1443 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1444 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1446 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1447 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1449 port->rx_conf.rx_thresh = rx_thresh;
1450 port->rx_conf.rx_free_thresh = rx_free_thresh;
1451 port->rx_conf.rx_drop_en = rx_drop_en;
1452 port->tx_conf.tx_thresh = tx_thresh;
1453 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1454 port->tx_conf.tx_free_thresh = tx_free_thresh;
1455 port->tx_conf.txq_flags = txq_flags;
1457 rte_eth_macaddr_get(pid, &port->eth_addr);
1459 map_port_queue_stats_mapping_registers(pid, port);
1463 const uint16_t vlan_tags[] = {
1464 0, 1, 2, 3, 4, 5, 6, 7,
1465 8, 9, 10, 11, 12, 13, 14, 15,
1466 16, 17, 18, 19, 20, 21, 22, 23,
1467 24, 25, 26, 27, 28, 29, 30, 31
1471 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1476 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1477 * given above, and the number of traffic classes available for use.
1479 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1480 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1481 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1483 /* VMDQ+DCB RX and TX configrations */
1484 vmdq_rx_conf.enable_default_pool = 0;
1485 vmdq_rx_conf.default_pool = 0;
1486 vmdq_rx_conf.nb_queue_pools =
1487 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1488 vmdq_tx_conf.nb_queue_pools =
1489 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1491 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1492 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1493 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1494 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1496 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1497 vmdq_rx_conf.dcb_queue[i] = i;
1498 vmdq_tx_conf.dcb_queue[i] = i;
1501 /*set DCB mode of RX and TX of multiple queues*/
1502 eth_conf->rxmode.mq_mode = ETH_VMDQ_DCB;
1503 eth_conf->txmode.mq_mode = ETH_VMDQ_DCB_TX;
1504 if (dcb_conf->pfc_en)
1505 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1507 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1509 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1510 sizeof(struct rte_eth_vmdq_dcb_conf)));
1511 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1512 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1515 struct rte_eth_dcb_rx_conf rx_conf;
1516 struct rte_eth_dcb_tx_conf tx_conf;
1518 /* queue mapping configuration of DCB RX and TX */
1519 if (dcb_conf->num_tcs == ETH_4_TCS)
1520 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1522 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1524 rx_conf.nb_tcs = dcb_conf->num_tcs;
1525 tx_conf.nb_tcs = dcb_conf->num_tcs;
1527 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1528 rx_conf.dcb_queue[i] = i;
1529 tx_conf.dcb_queue[i] = i;
1531 eth_conf->rxmode.mq_mode = ETH_DCB_RX;
1532 eth_conf->txmode.mq_mode = ETH_DCB_TX;
1533 if (dcb_conf->pfc_en)
1534 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1536 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1538 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1539 sizeof(struct rte_eth_dcb_rx_conf)));
1540 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1541 sizeof(struct rte_eth_dcb_tx_conf)));
1548 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1550 struct rte_eth_conf port_conf;
1551 struct rte_port *rte_port;
1556 /* rxq and txq configuration in dcb mode */
1559 rx_free_thresh = 64;
1561 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1562 /* Enter DCB configuration status */
1565 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1566 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1567 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1571 rte_port = &ports[pid];
1572 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1574 rte_port->rx_conf.rx_thresh = rx_thresh;
1575 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1576 rte_port->tx_conf.tx_thresh = tx_thresh;
1577 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1578 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1580 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1581 for (i = 0; i < nb_vlan; i++){
1582 rx_vft_set(pid, vlan_tags[i], 1);
1585 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1586 map_port_queue_stats_mapping_registers(pid, rte_port);
1591 #ifdef RTE_EXEC_ENV_BAREMETAL
1596 main(int argc, char** argv)
1601 diag = rte_eal_init(argc, argv);
1603 rte_panic("Cannot init EAL\n");
1605 if (rte_pmd_init_all())
1606 rte_panic("Cannot init PMD\n");
1608 if (rte_eal_pci_probe())
1609 rte_panic("Cannot probe PCI\n");
1611 nb_ports = (portid_t) rte_eth_dev_count();
1613 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1615 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1616 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1617 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1618 "configuration file\n");
1620 set_def_fwd_config();
1622 rte_panic("Empty set of forwarding logical cores - check the "
1623 "core mask supplied in the command parameters\n");
1628 launch_args_parse(argc, argv);
1630 if (nb_rxq > nb_txq)
1631 printf("Warning: nb_rxq=%d enables RSS configuration, "
1632 "but nb_txq=%d will prevent to fully test it.\n",
1636 start_port(RTE_PORT_ALL);
1638 /* set all ports to promiscuous mode by default */
1639 for (port_id = 0; port_id < nb_ports; port_id++)
1640 rte_eth_promiscuous_enable(port_id);
1642 if (interactive == 1)
1648 printf("No commandline core given, start packet forwarding\n");
1649 start_packet_forwarding(0);
1650 printf("Press enter to exit\n");
1651 rc = read(0, &c, 1);