4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_tailq.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
79 uint16_t verbose_level = 0; /**< Silent by default. */
81 /* use master core for command line ? */
82 uint8_t interactive = 0;
85 * NUMA support configuration.
86 * When set, the NUMA support attempts to dispatch the allocation of the
87 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
88 * probed ports among the CPU sockets 0 and 1.
89 * Otherwise, all memory is allocated from CPU socket 0.
91 uint8_t numa_support = 0; /**< No numa support by default */
94 * Record the Ethernet address of peer target ports to which packets are
96 * Must be instanciated with the ethernet addresses of peer traffic generator
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
103 * Probed Target Environment.
105 struct rte_port *ports; /**< For all probed ethernet ports. */
106 portid_t nb_ports; /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
111 * Test Forwarding Configuration.
112 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t nb_cfg_ports; /**< Number of configured ports. */
118 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
127 * Forwarding engines.
129 struct fwd_engine * fwd_engines[] = {
135 #ifdef RTE_LIBRTE_IEEE1588
136 &ieee1588_fwd_engine,
141 struct fwd_config cur_fwd_config;
142 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
144 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
145 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
146 * specified on command-line. */
149 * Configuration of packet segments used by the "txonly" processing engine.
151 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
152 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
153 TXONLY_DEF_PACKET_LEN,
155 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
157 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
158 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
160 /* current configuration is in DCB or not,0 means it is not in DCB mode */
161 uint8_t dcb_config = 0;
163 /* Whether the dcb is in testing status */
164 uint8_t dcb_test = 0;
166 /* DCB on and VT on mapping is default */
167 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
170 * Configurable number of RX/TX queues.
172 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
173 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
176 * Configurable number of RX/TX ring descriptors.
178 #define RTE_TEST_RX_DESC_DEFAULT 128
179 #define RTE_TEST_TX_DESC_DEFAULT 512
180 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
181 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
184 * Configurable values of RX and TX ring threshold registers.
186 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
187 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
188 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
190 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
191 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
192 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
194 struct rte_eth_thresh rx_thresh = {
195 .pthresh = RX_PTHRESH,
196 .hthresh = RX_HTHRESH,
197 .wthresh = RX_WTHRESH,
200 struct rte_eth_thresh tx_thresh = {
201 .pthresh = TX_PTHRESH,
202 .hthresh = TX_HTHRESH,
203 .wthresh = TX_WTHRESH,
207 * Configurable value of RX free threshold.
209 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
212 * Configurable value of RX drop enable.
214 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
217 * Configurable value of TX free threshold.
219 uint16_t tx_free_thresh = 0; /* Use default values. */
222 * Configurable value of TX RS bit threshold.
224 uint16_t tx_rs_thresh = 0; /* Use default values. */
227 * Configurable value of TX queue flags.
229 uint32_t txq_flags = 0; /* No flags set. */
232 * Receive Side Scaling (RSS) configuration.
234 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
237 * Port topology configuration
239 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
242 * Ethernet device configuration.
244 struct rte_eth_rxmode rx_mode = {
245 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
247 .header_split = 0, /**< Header Split disabled. */
248 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
249 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
250 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
251 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
252 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
253 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
256 struct rte_fdir_conf fdir_conf = {
257 .mode = RTE_FDIR_MODE_NONE,
258 .pballoc = RTE_FDIR_PBALLOC_64K,
259 .status = RTE_FDIR_REPORT_STATUS,
260 .flexbytes_offset = 0x6,
264 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
266 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
267 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
269 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
270 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
272 uint16_t nb_tx_queue_stats_mappings = 0;
273 uint16_t nb_rx_queue_stats_mappings = 0;
275 /* Forward function declarations */
276 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
277 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
280 * Check if all the ports are started.
281 * If yes, return positive value. If not, return zero.
283 static int all_ports_started(void);
286 * Setup default configuration.
289 set_default_fwd_lcores_config(void)
295 for (i = 0; i < RTE_MAX_LCORE; i++) {
296 if (! rte_lcore_is_enabled(i))
298 if (i == rte_get_master_lcore())
300 fwd_lcores_cpuids[nb_lc++] = i;
302 nb_lcores = (lcoreid_t) nb_lc;
303 nb_cfg_lcores = nb_lcores;
308 set_def_peer_eth_addrs(void)
312 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
313 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
314 peer_eth_addrs[i].addr_bytes[5] = i;
319 set_default_fwd_ports_config(void)
323 for (pt_id = 0; pt_id < nb_ports; pt_id++)
324 fwd_ports_ids[pt_id] = pt_id;
326 nb_cfg_ports = nb_ports;
327 nb_fwd_ports = nb_ports;
331 set_def_fwd_config(void)
333 set_default_fwd_lcores_config();
334 set_def_peer_eth_addrs();
335 set_default_fwd_ports_config();
339 * Configuration initialisation done once at init time.
341 struct mbuf_ctor_arg {
342 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
343 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
346 struct mbuf_pool_ctor_arg {
347 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
351 testpmd_mbuf_ctor(struct rte_mempool *mp,
354 __attribute__((unused)) unsigned i)
356 struct mbuf_ctor_arg *mb_ctor_arg;
359 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
360 mb = (struct rte_mbuf *) raw_mbuf;
363 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
364 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
365 mb_ctor_arg->seg_buf_offset);
366 mb->buf_len = mb_ctor_arg->seg_buf_size;
367 mb->type = RTE_MBUF_PKT;
369 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
371 mb->pkt.vlan_macip.data = 0;
372 mb->pkt.hash.rss = 0;
376 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
379 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
380 struct rte_pktmbuf_pool_private *mbp_priv;
382 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
383 printf("%s(%s) private_data_size %d < %d\n",
384 __func__, mp->name, (int) mp->private_data_size,
385 (int) sizeof(struct rte_pktmbuf_pool_private));
388 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
389 mbp_priv = (struct rte_pktmbuf_pool_private *)
390 ((char *)mp + sizeof(struct rte_mempool));
391 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
395 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
396 unsigned int socket_id)
398 char pool_name[RTE_MEMPOOL_NAMESIZE];
399 struct rte_mempool *rte_mp;
400 struct mbuf_pool_ctor_arg mbp_ctor_arg;
401 struct mbuf_ctor_arg mb_ctor_arg;
404 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
406 mb_ctor_arg.seg_buf_offset =
407 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
408 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
409 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
410 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
411 rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
412 (unsigned) mb_mempool_cache,
413 sizeof(struct rte_pktmbuf_pool_private),
414 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
415 testpmd_mbuf_ctor, &mb_ctor_arg,
417 if (rte_mp == NULL) {
418 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
419 "failed\n", socket_id);
427 struct rte_port *port;
428 struct rte_mempool *mbp;
429 unsigned int nb_mbuf_per_pool;
432 /* Configuration of logical cores. */
433 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
434 sizeof(struct fwd_lcore *) * nb_lcores,
436 if (fwd_lcores == NULL) {
437 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
438 "failed\n", nb_lcores);
440 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
441 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
442 sizeof(struct fwd_lcore),
444 if (fwd_lcores[lc_id] == NULL) {
445 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
448 fwd_lcores[lc_id]->cpuid_idx = lc_id;
452 * Create pools of mbuf.
453 * If NUMA support is disabled, create a single pool of mbuf in
455 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
457 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
458 * nb_txd can be configured at run time.
460 if (param_total_num_mbufs)
461 nb_mbuf_per_pool = param_total_num_mbufs;
463 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
464 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
465 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
468 nb_mbuf_per_pool /= 2;
469 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
470 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1);
472 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
476 * Records which Mbuf pool to use by each logical core, if needed.
478 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
479 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
481 mbp = mbuf_pool_find(0);
482 fwd_lcores[lc_id]->mbp = mbp;
485 /* Configuration of Ethernet ports. */
486 ports = rte_zmalloc("testpmd: ports",
487 sizeof(struct rte_port) * nb_ports,
490 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
491 "failed\n", nb_ports);
494 for (pid = 0; pid < nb_ports; pid++) {
496 rte_eth_dev_info_get(pid, &port->dev_info);
498 /* set flag to initialize port/queue */
499 port->need_reconfig = 1;
500 port->need_reconfig_queues = 1;
505 /* Configuration of packet forwarding streams. */
506 if (init_fwd_streams() < 0)
507 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
511 init_fwd_streams(void)
514 struct rte_port *port;
515 streamid_t sm_id, nb_fwd_streams_new;
517 /* set socket id according to numa or not */
518 for (pid = 0; pid < nb_ports; pid++) {
520 if (nb_rxq > port->dev_info.max_rx_queues) {
521 printf("Fail: nb_rxq(%d) is greater than "
522 "max_rx_queues(%d)\n", nb_rxq,
523 port->dev_info.max_rx_queues);
526 if (nb_txq > port->dev_info.max_tx_queues) {
527 printf("Fail: nb_txq(%d) is greater than "
528 "max_tx_queues(%d)\n", nb_txq,
529 port->dev_info.max_tx_queues);
533 port->socket_id = (pid < (nb_ports >> 1)) ? 0 : 1;
538 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
539 if (nb_fwd_streams_new == nb_fwd_streams)
542 if (fwd_streams != NULL) {
543 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
544 if (fwd_streams[sm_id] == NULL)
546 rte_free(fwd_streams[sm_id]);
547 fwd_streams[sm_id] = NULL;
549 rte_free(fwd_streams);
554 nb_fwd_streams = nb_fwd_streams_new;
555 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
556 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
557 if (fwd_streams == NULL)
558 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
559 "failed\n", nb_fwd_streams);
561 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
562 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
563 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
564 if (fwd_streams[sm_id] == NULL)
565 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
572 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
574 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
576 unsigned int total_burst;
577 unsigned int nb_burst;
578 unsigned int burst_stats[3];
579 uint16_t pktnb_stats[3];
581 int burst_percent[3];
584 * First compute the total number of packet bursts and the
585 * two highest numbers of bursts of the same number of packets.
588 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
589 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
590 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
591 nb_burst = pbs->pkt_burst_spread[nb_pkt];
594 total_burst += nb_burst;
595 if (nb_burst > burst_stats[0]) {
596 burst_stats[1] = burst_stats[0];
597 pktnb_stats[1] = pktnb_stats[0];
598 burst_stats[0] = nb_burst;
599 pktnb_stats[0] = nb_pkt;
602 if (total_burst == 0)
604 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
605 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
606 burst_percent[0], (int) pktnb_stats[0]);
607 if (burst_stats[0] == total_burst) {
611 if (burst_stats[0] + burst_stats[1] == total_burst) {
612 printf(" + %d%% of %d pkts]\n",
613 100 - burst_percent[0], pktnb_stats[1]);
616 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
617 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
618 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
619 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
622 printf(" + %d%% of %d pkts + %d%% of others]\n",
623 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
625 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
628 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
630 struct rte_port *port;
633 static const char *fwd_stats_border = "----------------------";
635 port = &ports[port_id];
636 printf("\n %s Forward statistics for port %-2d %s\n",
637 fwd_stats_border, port_id, fwd_stats_border);
639 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
640 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
642 stats->ipackets, stats->ierrors,
643 (uint64_t) (stats->ipackets + stats->ierrors));
645 if (cur_fwd_eng == &csum_fwd_engine)
646 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
647 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
649 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
651 stats->opackets, port->tx_dropped,
652 (uint64_t) (stats->opackets + port->tx_dropped));
654 if (stats->rx_nombuf > 0)
655 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
659 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
661 stats->ipackets, stats->ierrors,
662 (uint64_t) (stats->ipackets + stats->ierrors));
664 if (cur_fwd_eng == &csum_fwd_engine)
665 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
666 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
668 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
670 stats->opackets, port->tx_dropped,
671 (uint64_t) (stats->opackets + port->tx_dropped));
673 if (stats->rx_nombuf > 0)
674 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
676 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
678 pkt_burst_stats_display("RX",
679 &port->rx_stream->rx_burst_stats);
681 pkt_burst_stats_display("TX",
682 &port->tx_stream->tx_burst_stats);
685 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
686 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
690 if (port->rx_queue_stats_mapping_enabled) {
692 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
693 printf(" Stats reg %2d RX-packets:%14"PRIu64
694 " RX-errors:%14"PRIu64
695 " RX-bytes:%14"PRIu64"\n",
696 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
700 if (port->tx_queue_stats_mapping_enabled) {
701 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
702 printf(" Stats reg %2d TX-packets:%14"PRIu64
703 " TX-bytes:%14"PRIu64"\n",
704 i, stats->q_opackets[i], stats->q_obytes[i]);
708 printf(" %s--------------------------------%s\n",
709 fwd_stats_border, fwd_stats_border);
713 fwd_stream_stats_display(streamid_t stream_id)
715 struct fwd_stream *fs;
716 static const char *fwd_top_stats_border = "-------";
718 fs = fwd_streams[stream_id];
719 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
720 (fs->fwd_dropped == 0))
722 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
723 "TX Port=%2d/Queue=%2d %s\n",
724 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
725 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
726 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
727 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
729 /* if checksum mode */
730 if (cur_fwd_eng == &csum_fwd_engine) {
731 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
732 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
735 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
736 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
737 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
742 flush_all_rx_queues(void)
744 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
751 for (j = 0; j < 2; j++) {
752 for (rxp = 0; rxp < nb_ports; rxp++) {
753 for (rxq = 0; rxq < nb_rxq; rxq++) {
755 nb_rx = rte_eth_rx_burst(rxp, rxq,
756 pkts_burst, MAX_PKT_BURST);
757 for (i = 0; i < nb_rx; i++)
758 rte_pktmbuf_free(pkts_burst[i]);
762 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
767 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
769 struct fwd_stream **fsm;
773 fsm = &fwd_streams[fc->stream_idx];
774 nb_fs = fc->stream_nb;
776 for (sm_id = 0; sm_id < nb_fs; sm_id++)
777 (*pkt_fwd)(fsm[sm_id]);
778 } while (! fc->stopped);
782 start_pkt_forward_on_core(void *fwd_arg)
784 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
785 cur_fwd_config.fwd_eng->packet_fwd);
790 * Run the TXONLY packet forwarding engine to send a single burst of packets.
791 * Used to start communication flows in network loopback test configurations.
794 run_one_txonly_burst_on_core(void *fwd_arg)
796 struct fwd_lcore *fwd_lc;
797 struct fwd_lcore tmp_lcore;
799 fwd_lc = (struct fwd_lcore *) fwd_arg;
801 tmp_lcore.stopped = 1;
802 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
807 * Launch packet forwarding:
808 * - Setup per-port forwarding context.
809 * - launch logical cores with their forwarding configuration.
812 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
814 port_fwd_begin_t port_fwd_begin;
819 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
820 if (port_fwd_begin != NULL) {
821 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
822 (*port_fwd_begin)(fwd_ports_ids[i]);
824 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
825 lc_id = fwd_lcores_cpuids[i];
826 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
827 fwd_lcores[i]->stopped = 0;
828 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
829 fwd_lcores[i], lc_id);
831 printf("launch lcore %u failed - diag=%d\n",
838 * Launch packet forwarding configuration.
841 start_packet_forwarding(int with_tx_first)
843 port_fwd_begin_t port_fwd_begin;
844 port_fwd_end_t port_fwd_end;
845 struct rte_port *port;
850 if (all_ports_started() == 0) {
851 printf("Not all ports were started\n");
854 if (test_done == 0) {
855 printf("Packet forwarding already started\n");
858 if((dcb_test) && (nb_fwd_lcores == 1)) {
859 printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
863 flush_all_rx_queues();
865 rxtx_config_display();
867 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
868 pt_id = fwd_ports_ids[i];
869 port = &ports[pt_id];
870 rte_eth_stats_get(pt_id, &port->stats);
871 port->tx_dropped = 0;
873 map_port_queue_stats_mapping_registers(pt_id, port);
875 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
876 fwd_streams[sm_id]->rx_packets = 0;
877 fwd_streams[sm_id]->tx_packets = 0;
878 fwd_streams[sm_id]->fwd_dropped = 0;
879 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
880 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
884 sizeof(fwd_streams[sm_id]->rx_burst_stats));
885 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
886 sizeof(fwd_streams[sm_id]->tx_burst_stats));
888 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
889 fwd_streams[sm_id]->core_cycles = 0;
893 port_fwd_begin = tx_only_engine.port_fwd_begin;
894 if (port_fwd_begin != NULL) {
895 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
896 (*port_fwd_begin)(fwd_ports_ids[i]);
898 launch_packet_forwarding(run_one_txonly_burst_on_core);
899 rte_eal_mp_wait_lcore();
900 port_fwd_end = tx_only_engine.port_fwd_end;
901 if (port_fwd_end != NULL) {
902 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
903 (*port_fwd_end)(fwd_ports_ids[i]);
906 launch_packet_forwarding(start_pkt_forward_on_core);
910 stop_packet_forwarding(void)
912 struct rte_eth_stats stats;
913 struct rte_port *port;
914 port_fwd_end_t port_fwd_end;
921 uint64_t total_rx_dropped;
922 uint64_t total_tx_dropped;
923 uint64_t total_rx_nombuf;
925 uint64_t rx_bad_ip_csum;
926 uint64_t rx_bad_l4_csum;
927 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
930 static const char *acc_stats_border = "+++++++++++++++";
932 if (all_ports_started() == 0) {
933 printf("Not all ports were started\n");
937 printf("Packet forwarding not started\n");
940 printf("Telling cores to stop...");
941 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
942 fwd_lcores[lc_id]->stopped = 1;
943 printf("\nWaiting for lcores to finish...\n");
944 rte_eal_mp_wait_lcore();
945 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
946 if (port_fwd_end != NULL) {
947 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
948 pt_id = fwd_ports_ids[i];
949 (*port_fwd_end)(pt_id);
952 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
955 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
956 if (cur_fwd_config.nb_fwd_streams >
957 cur_fwd_config.nb_fwd_ports) {
958 fwd_stream_stats_display(sm_id);
959 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
960 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
962 ports[fwd_streams[sm_id]->tx_port].tx_stream =
964 ports[fwd_streams[sm_id]->rx_port].rx_stream =
967 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
968 tx_dropped = (uint64_t) (tx_dropped +
969 fwd_streams[sm_id]->fwd_dropped);
970 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
973 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
974 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
975 fwd_streams[sm_id]->rx_bad_ip_csum);
976 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
980 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
981 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
982 fwd_streams[sm_id]->rx_bad_l4_csum);
983 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
986 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
987 fwd_cycles = (uint64_t) (fwd_cycles +
988 fwd_streams[sm_id]->core_cycles);
993 total_rx_dropped = 0;
994 total_tx_dropped = 0;
996 for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
997 pt_id = fwd_ports_ids[i];
999 port = &ports[pt_id];
1000 rte_eth_stats_get(pt_id, &stats);
1001 stats.ipackets -= port->stats.ipackets;
1002 port->stats.ipackets = 0;
1003 stats.opackets -= port->stats.opackets;
1004 port->stats.opackets = 0;
1005 stats.ibytes -= port->stats.ibytes;
1006 port->stats.ibytes = 0;
1007 stats.obytes -= port->stats.obytes;
1008 port->stats.obytes = 0;
1009 stats.ierrors -= port->stats.ierrors;
1010 port->stats.ierrors = 0;
1011 stats.oerrors -= port->stats.oerrors;
1012 port->stats.oerrors = 0;
1013 stats.rx_nombuf -= port->stats.rx_nombuf;
1014 port->stats.rx_nombuf = 0;
1015 stats.fdirmatch -= port->stats.fdirmatch;
1016 port->stats.rx_nombuf = 0;
1017 stats.fdirmiss -= port->stats.fdirmiss;
1018 port->stats.rx_nombuf = 0;
1020 total_recv += stats.ipackets;
1021 total_xmit += stats.opackets;
1022 total_rx_dropped += stats.ierrors;
1023 total_tx_dropped += port->tx_dropped;
1024 total_rx_nombuf += stats.rx_nombuf;
1026 fwd_port_stats_display(pt_id, &stats);
1028 printf("\n %s Accumulated forward statistics for all ports"
1030 acc_stats_border, acc_stats_border);
1031 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1033 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1035 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1036 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1037 if (total_rx_nombuf > 0)
1038 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1039 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1041 acc_stats_border, acc_stats_border);
1042 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1044 printf("\n CPU cycles/packet=%u (total cycles="
1045 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1046 (unsigned int)(fwd_cycles / total_recv),
1047 fwd_cycles, total_recv);
1049 printf("\nDone.\n");
1054 all_ports_started(void)
1057 struct rte_port *port;
1059 for (pi = 0; pi < nb_ports; pi++) {
1061 /* Check if there is a port which is not started */
1062 if (port->port_status != RTE_PORT_STARTED)
1066 /* No port is not started */
1071 start_port(portid_t pid)
1073 int diag, need_check_link_status = 0;
1076 struct rte_port *port;
1078 if (test_done == 0) {
1079 printf("Please stop forwarding first\n");
1083 if (init_fwd_streams() < 0) {
1084 printf("Fail from init_fwd_streams()\n");
1090 for (pi = 0; pi < nb_ports; pi++) {
1091 if (pid < nb_ports && pid != pi)
1095 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1096 RTE_PORT_HANDLING) == 0) {
1097 printf("Port %d is now not stopped\n", pi);
1101 if (port->need_reconfig > 0) {
1102 port->need_reconfig = 0;
1104 printf("Configuring Port %d\n", pi);
1105 /* configure port */
1106 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1109 if (rte_atomic16_cmpset(&(port->port_status),
1110 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1111 printf("Port %d can not be set back "
1112 "to stopped\n", pi);
1113 printf("Fail to configure port %d\n", pi);
1114 /* try to reconfigure port next time */
1115 port->need_reconfig = 1;
1120 if (port->need_reconfig_queues > 0) {
1121 port->need_reconfig_queues = 0;
1123 /* setup tx queues */
1124 for (qi = 0; qi < nb_txq; qi++) {
1125 diag = rte_eth_tx_queue_setup(pi, qi, nb_txd,
1126 port->socket_id, &(port->tx_conf));
1130 /* Fail to setup tx queue, return */
1131 if (rte_atomic16_cmpset(&(port->port_status),
1133 RTE_PORT_STOPPED) == 0)
1134 printf("Port %d can not be set back "
1135 "to stopped\n", pi);
1136 printf("Fail to configure port %d tx queues\n", pi);
1137 /* try to reconfigure queues next time */
1138 port->need_reconfig_queues = 1;
1141 /* setup rx queues */
1142 for (qi = 0; qi < nb_rxq; qi++) {
1143 diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd,
1144 port->socket_id, &(port->rx_conf),
1145 mbuf_pool_find(port->socket_id));
1149 /* Fail to setup rx queue, return */
1150 if (rte_atomic16_cmpset(&(port->port_status),
1152 RTE_PORT_STOPPED) == 0)
1153 printf("Port %d can not be set back "
1154 "to stopped\n", pi);
1155 printf("Fail to configure port %d rx queues\n", pi);
1156 /* try to reconfigure queues next time */
1157 port->need_reconfig_queues = 1;
1163 if (rte_eth_dev_start(pi) < 0) {
1164 printf("Fail to start port %d\n", pi);
1166 /* Fail to setup rx queue, return */
1167 if (rte_atomic16_cmpset(&(port->port_status),
1168 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1169 printf("Port %d can not be set back to "
1174 if (rte_atomic16_cmpset(&(port->port_status),
1175 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1176 printf("Port %d can not be set into started\n", pi);
1178 /* at least one port started, need checking link status */
1179 need_check_link_status = 1;
1182 if (need_check_link_status)
1183 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1185 printf("Please stop the ports first\n");
1191 stop_port(portid_t pid)
1194 struct rte_port *port;
1195 int need_check_link_status = 0;
1197 if (test_done == 0) {
1198 printf("Please stop forwarding first\n");
1205 printf("Stopping ports...\n");
1207 for (pi = 0; pi < nb_ports; pi++) {
1208 if (pid < nb_ports && pid != pi)
1212 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1213 RTE_PORT_HANDLING) == 0)
1216 rte_eth_dev_stop(pi);
1218 if (rte_atomic16_cmpset(&(port->port_status),
1219 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1220 printf("Port %d can not be set into stopped\n", pi);
1221 need_check_link_status = 1;
1223 if (need_check_link_status)
1224 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1230 close_port(portid_t pid)
1233 struct rte_port *port;
1235 if (test_done == 0) {
1236 printf("Please stop forwarding first\n");
1240 printf("Closing ports...\n");
1242 for (pi = 0; pi < nb_ports; pi++) {
1243 if (pid < nb_ports && pid != pi)
1247 if (rte_atomic16_cmpset(&(port->port_status),
1248 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1249 printf("Port %d is now not stopped\n", pi);
1253 rte_eth_dev_close(pi);
1255 if (rte_atomic16_cmpset(&(port->port_status),
1256 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1257 printf("Port %d can not be set into stopped\n", pi);
1264 all_ports_stopped(void)
1267 struct rte_port *port;
1269 for (pi = 0; pi < nb_ports; pi++) {
1271 if (port->port_status != RTE_PORT_STOPPED)
1283 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1284 printf("Stopping port %d...", pt_id);
1286 rte_eth_dev_close(pt_id);
1292 typedef void (*cmd_func_t)(void);
1293 struct pmd_test_command {
1294 const char *cmd_name;
1295 cmd_func_t cmd_func;
1298 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1300 /* Check the link status of all ports in up to 9s, and print them finally */
1302 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1304 #define CHECK_INTERVAL 100 /* 100ms */
1305 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1306 uint8_t portid, count, all_ports_up, print_flag = 0;
1307 struct rte_eth_link link;
1309 printf("Checking link statuses...\n");
1311 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1313 for (portid = 0; portid < port_num; portid++) {
1314 if ((port_mask & (1 << portid)) == 0)
1316 memset(&link, 0, sizeof(link));
1317 rte_eth_link_get_nowait(portid, &link);
1318 /* print link status if flag set */
1319 if (print_flag == 1) {
1320 if (link.link_status)
1321 printf("Port %d Link Up - speed %u "
1322 "Mbps - %s\n", (uint8_t)portid,
1323 (unsigned)link.link_speed,
1324 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1325 ("full-duplex") : ("half-duplex\n"));
1327 printf("Port %d Link Down\n",
1331 /* clear all_ports_up flag if any link down */
1332 if (link.link_status == 0) {
1337 /* after finally printing all link status, get out */
1338 if (print_flag == 1)
1341 if (all_ports_up == 0) {
1343 rte_delay_ms(CHECK_INTERVAL);
1346 /* set the print_flag if all ports up or timeout */
1347 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1354 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1358 uint8_t mapping_found = 0;
1360 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1361 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1362 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1363 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1364 tx_queue_stats_mappings[i].queue_id,
1365 tx_queue_stats_mappings[i].stats_counter_id);
1372 port->tx_queue_stats_mapping_enabled = 1;
1377 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1381 uint8_t mapping_found = 0;
1383 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1384 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1385 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1386 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1387 rx_queue_stats_mappings[i].queue_id,
1388 rx_queue_stats_mappings[i].stats_counter_id);
1395 port->rx_queue_stats_mapping_enabled = 1;
1400 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1404 diag = set_tx_queue_stats_mapping_registers(pi, port);
1406 if (diag == -ENOTSUP) {
1407 port->tx_queue_stats_mapping_enabled = 0;
1408 printf("TX queue stats mapping not supported port id=%d\n", pi);
1411 rte_exit(EXIT_FAILURE,
1412 "set_tx_queue_stats_mapping_registers "
1413 "failed for port id=%d diag=%d\n",
1417 diag = set_rx_queue_stats_mapping_registers(pi, port);
1419 if (diag == -ENOTSUP) {
1420 port->rx_queue_stats_mapping_enabled = 0;
1421 printf("RX queue stats mapping not supported port id=%d\n", pi);
1424 rte_exit(EXIT_FAILURE,
1425 "set_rx_queue_stats_mapping_registers "
1426 "failed for port id=%d diag=%d\n",
1432 init_port_config(void)
1435 struct rte_port *port;
1437 for (pid = 0; pid < nb_ports; pid++) {
1439 port->dev_conf.rxmode = rx_mode;
1440 port->dev_conf.fdir_conf = fdir_conf;
1442 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1443 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1445 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1446 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1448 port->rx_conf.rx_thresh = rx_thresh;
1449 port->rx_conf.rx_free_thresh = rx_free_thresh;
1450 port->rx_conf.rx_drop_en = rx_drop_en;
1451 port->tx_conf.tx_thresh = tx_thresh;
1452 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1453 port->tx_conf.tx_free_thresh = tx_free_thresh;
1454 port->tx_conf.txq_flags = txq_flags;
1456 rte_eth_macaddr_get(pid, &port->eth_addr);
1458 map_port_queue_stats_mapping_registers(pid, port);
1462 const uint16_t vlan_tags[] = {
1463 0, 1, 2, 3, 4, 5, 6, 7,
1464 8, 9, 10, 11, 12, 13, 14, 15,
1465 16, 17, 18, 19, 20, 21, 22, 23,
1466 24, 25, 26, 27, 28, 29, 30, 31
1470 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1475 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1476 * given above, and the number of traffic classes available for use.
1478 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1479 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1480 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1482 /* VMDQ+DCB RX and TX configrations */
1483 vmdq_rx_conf.enable_default_pool = 0;
1484 vmdq_rx_conf.default_pool = 0;
1485 vmdq_rx_conf.nb_queue_pools =
1486 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1487 vmdq_tx_conf.nb_queue_pools =
1488 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1490 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1491 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1492 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1493 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1495 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1496 vmdq_rx_conf.dcb_queue[i] = i;
1497 vmdq_tx_conf.dcb_queue[i] = i;
1500 /*set DCB mode of RX and TX of multiple queues*/
1501 eth_conf->rxmode.mq_mode = ETH_VMDQ_DCB;
1502 eth_conf->txmode.mq_mode = ETH_VMDQ_DCB_TX;
1503 if (dcb_conf->pfc_en)
1504 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1506 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1508 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1509 sizeof(struct rte_eth_vmdq_dcb_conf)));
1510 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1511 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1514 struct rte_eth_dcb_rx_conf rx_conf;
1515 struct rte_eth_dcb_tx_conf tx_conf;
1517 /* queue mapping configuration of DCB RX and TX */
1518 if (dcb_conf->num_tcs == ETH_4_TCS)
1519 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1521 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1523 rx_conf.nb_tcs = dcb_conf->num_tcs;
1524 tx_conf.nb_tcs = dcb_conf->num_tcs;
1526 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1527 rx_conf.dcb_queue[i] = i;
1528 tx_conf.dcb_queue[i] = i;
1530 eth_conf->rxmode.mq_mode = ETH_DCB_RX;
1531 eth_conf->txmode.mq_mode = ETH_DCB_TX;
1532 if (dcb_conf->pfc_en)
1533 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1535 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1537 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1538 sizeof(struct rte_eth_dcb_rx_conf)));
1539 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1540 sizeof(struct rte_eth_dcb_tx_conf)));
1547 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1549 struct rte_eth_conf port_conf;
1550 struct rte_port *rte_port;
1555 /* rxq and txq configuration in dcb mode */
1558 rx_free_thresh = 64;
1560 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1561 /* Enter DCB configuration status */
1564 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1565 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1566 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1570 rte_port = &ports[pid];
1571 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1573 rte_port->rx_conf.rx_thresh = rx_thresh;
1574 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1575 rte_port->tx_conf.tx_thresh = tx_thresh;
1576 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1577 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1579 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1580 for (i = 0; i < nb_vlan; i++){
1581 rx_vft_set(pid, vlan_tags[i], 1);
1584 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1585 map_port_queue_stats_mapping_registers(pid, rte_port);
1590 #ifdef RTE_EXEC_ENV_BAREMETAL
1595 main(int argc, char** argv)
1600 diag = rte_eal_init(argc, argv);
1602 rte_panic("Cannot init EAL\n");
1604 if (rte_pmd_init_all())
1605 rte_panic("Cannot init PMD\n");
1607 if (rte_eal_pci_probe())
1608 rte_panic("Cannot probe PCI\n");
1610 nb_ports = (portid_t) rte_eth_dev_count();
1612 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1614 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1615 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1616 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1617 "configuration file\n");
1619 set_def_fwd_config();
1621 rte_panic("Empty set of forwarding logical cores - check the "
1622 "core mask supplied in the command parameters\n");
1627 launch_args_parse(argc, argv);
1629 if (nb_rxq > nb_txq)
1630 printf("Warning: nb_rxq=%d enables RSS configuration, "
1631 "but nb_txq=%d will prevent to fully test it.\n",
1635 start_port(RTE_PORT_ALL);
1637 /* set all ports to promiscuous mode by default */
1638 for (port_id = 0; port_id < nb_ports; port_id++)
1639 rte_eth_promiscuous_enable(port_id);
1641 if (interactive == 1)
1647 printf("No commandline core given, start packet forwarding\n");
1648 start_packet_forwarding(0);
1649 printf("Press enter to exit\n");
1650 rc = read(0, &c, 1);