4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_tailq.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_interrupts.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
79 uint16_t verbose_level = 0; /**< Silent by default. */
81 /* use master core for command line ? */
82 uint8_t interactive = 0;
85 * NUMA support configuration.
86 * When set, the NUMA support attempts to dispatch the allocation of the
87 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
88 * probed ports among the CPU sockets 0 and 1.
89 * Otherwise, all memory is allocated from CPU socket 0.
91 uint8_t numa_support = 0; /**< No numa support by default */
94 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
97 uint8_t socket_num = UMA_NO_CONFIG;
100 * Record the Ethernet address of peer target ports to which packets are
102 * Must be instanciated with the ethernet addresses of peer traffic generator
105 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
106 portid_t nb_peer_eth_addrs = 0;
109 * Probed Target Environment.
111 struct rte_port *ports; /**< For all probed ethernet ports. */
112 portid_t nb_ports; /**< Number of probed ethernet ports. */
113 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
114 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
117 * Test Forwarding Configuration.
118 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
119 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
121 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
122 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
123 portid_t nb_cfg_ports; /**< Number of configured ports. */
124 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
126 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
127 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
129 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
130 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
133 * Forwarding engines.
135 struct fwd_engine * fwd_engines[] = {
141 #ifdef RTE_LIBRTE_IEEE1588
142 &ieee1588_fwd_engine,
147 struct fwd_config cur_fwd_config;
148 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
151 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
152 * specified on command-line. */
155 * Configuration of packet segments used by the "txonly" processing engine.
157 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
158 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
159 TXONLY_DEF_PACKET_LEN,
161 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
163 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
164 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
166 /* current configuration is in DCB or not,0 means it is not in DCB mode */
167 uint8_t dcb_config = 0;
169 /* Whether the dcb is in testing status */
170 uint8_t dcb_test = 0;
172 /* DCB on and VT on mapping is default */
173 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
176 * Configurable number of RX/TX queues.
178 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
179 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
182 * Configurable number of RX/TX ring descriptors.
184 #define RTE_TEST_RX_DESC_DEFAULT 128
185 #define RTE_TEST_TX_DESC_DEFAULT 512
186 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
187 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
190 * Configurable values of RX and TX ring threshold registers.
192 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
193 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
194 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
196 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
197 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
198 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
200 struct rte_eth_thresh rx_thresh = {
201 .pthresh = RX_PTHRESH,
202 .hthresh = RX_HTHRESH,
203 .wthresh = RX_WTHRESH,
206 struct rte_eth_thresh tx_thresh = {
207 .pthresh = TX_PTHRESH,
208 .hthresh = TX_HTHRESH,
209 .wthresh = TX_WTHRESH,
213 * Configurable value of RX free threshold.
215 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
218 * Configurable value of RX drop enable.
220 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
223 * Configurable value of TX free threshold.
225 uint16_t tx_free_thresh = 0; /* Use default values. */
228 * Configurable value of TX RS bit threshold.
230 uint16_t tx_rs_thresh = 0; /* Use default values. */
233 * Configurable value of TX queue flags.
235 uint32_t txq_flags = 0; /* No flags set. */
238 * Receive Side Scaling (RSS) configuration.
240 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
243 * Port topology configuration
245 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
248 * Ethernet device configuration.
250 struct rte_eth_rxmode rx_mode = {
251 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
253 .header_split = 0, /**< Header Split disabled. */
254 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
255 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
256 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
257 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
258 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
259 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
262 struct rte_fdir_conf fdir_conf = {
263 .mode = RTE_FDIR_MODE_NONE,
264 .pballoc = RTE_FDIR_PBALLOC_64K,
265 .status = RTE_FDIR_REPORT_STATUS,
266 .flexbytes_offset = 0x6,
270 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
272 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
273 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
275 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
276 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
278 uint16_t nb_tx_queue_stats_mappings = 0;
279 uint16_t nb_rx_queue_stats_mappings = 0;
281 /* Forward function declarations */
282 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
283 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
286 * Check if all the ports are started.
287 * If yes, return positive value. If not, return zero.
289 static int all_ports_started(void);
292 * Setup default configuration.
295 set_default_fwd_lcores_config(void)
301 for (i = 0; i < RTE_MAX_LCORE; i++) {
302 if (! rte_lcore_is_enabled(i))
304 if (i == rte_get_master_lcore())
306 fwd_lcores_cpuids[nb_lc++] = i;
308 nb_lcores = (lcoreid_t) nb_lc;
309 nb_cfg_lcores = nb_lcores;
314 set_def_peer_eth_addrs(void)
318 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
319 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
320 peer_eth_addrs[i].addr_bytes[5] = i;
325 set_default_fwd_ports_config(void)
329 for (pt_id = 0; pt_id < nb_ports; pt_id++)
330 fwd_ports_ids[pt_id] = pt_id;
332 nb_cfg_ports = nb_ports;
333 nb_fwd_ports = nb_ports;
337 set_def_fwd_config(void)
339 set_default_fwd_lcores_config();
340 set_def_peer_eth_addrs();
341 set_default_fwd_ports_config();
345 * Configuration initialisation done once at init time.
347 struct mbuf_ctor_arg {
348 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
349 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
352 struct mbuf_pool_ctor_arg {
353 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
357 testpmd_mbuf_ctor(struct rte_mempool *mp,
360 __attribute__((unused)) unsigned i)
362 struct mbuf_ctor_arg *mb_ctor_arg;
365 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
366 mb = (struct rte_mbuf *) raw_mbuf;
368 mb->type = RTE_MBUF_PKT;
370 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
371 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
372 mb_ctor_arg->seg_buf_offset);
373 mb->buf_len = mb_ctor_arg->seg_buf_size;
374 mb->type = RTE_MBUF_PKT;
376 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
378 mb->pkt.vlan_macip.data = 0;
379 mb->pkt.hash.rss = 0;
383 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
386 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
387 struct rte_pktmbuf_pool_private *mbp_priv;
389 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
390 printf("%s(%s) private_data_size %d < %d\n",
391 __func__, mp->name, (int) mp->private_data_size,
392 (int) sizeof(struct rte_pktmbuf_pool_private));
395 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
396 mbp_priv = (struct rte_pktmbuf_pool_private *)
397 ((char *)mp + sizeof(struct rte_mempool));
398 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
402 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
403 unsigned int socket_id)
405 char pool_name[RTE_MEMPOOL_NAMESIZE];
406 struct rte_mempool *rte_mp;
407 struct mbuf_pool_ctor_arg mbp_ctor_arg;
408 struct mbuf_ctor_arg mb_ctor_arg;
411 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
413 mb_ctor_arg.seg_buf_offset =
414 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
415 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
416 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
417 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418 rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
419 (unsigned) mb_mempool_cache,
420 sizeof(struct rte_pktmbuf_pool_private),
421 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
422 testpmd_mbuf_ctor, &mb_ctor_arg,
424 if (rte_mp == NULL) {
425 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
426 "failed\n", socket_id);
434 struct rte_port *port;
435 struct rte_mempool *mbp;
436 unsigned int nb_mbuf_per_pool;
438 uint8_t port_per_socket[MAX_SOCKET];
440 memset(port_per_socket,0,MAX_SOCKET);
441 /* Configuration of logical cores. */
442 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
443 sizeof(struct fwd_lcore *) * nb_lcores,
445 if (fwd_lcores == NULL) {
446 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
447 "failed\n", nb_lcores);
449 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
450 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
451 sizeof(struct fwd_lcore),
453 if (fwd_lcores[lc_id] == NULL) {
454 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
457 fwd_lcores[lc_id]->cpuid_idx = lc_id;
461 * Create pools of mbuf.
462 * If NUMA support is disabled, create a single pool of mbuf in
463 * socket 0 memory by default.
464 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
466 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
467 * nb_txd can be configured at run time.
469 if (param_total_num_mbufs)
470 nb_mbuf_per_pool = param_total_num_mbufs;
472 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
473 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
476 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
480 if (socket_num == UMA_NO_CONFIG)
481 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
483 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
487 * Records which Mbuf pool to use by each logical core, if needed.
489 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
490 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
492 mbp = mbuf_pool_find(0);
493 fwd_lcores[lc_id]->mbp = mbp;
496 /* Configuration of Ethernet ports. */
497 ports = rte_zmalloc("testpmd: ports",
498 sizeof(struct rte_port) * nb_ports,
501 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
502 "failed\n", nb_ports);
505 for (pid = 0; pid < nb_ports; pid++) {
507 rte_eth_dev_info_get(pid, &port->dev_info);
510 if (port_numa[pid] != NUMA_NO_CONFIG)
511 port_per_socket[port_numa[pid]]++;
513 uint32_t socket_id = rte_eth_dev_socket_id(pid);
514 port_per_socket[socket_id]++;
518 /* set flag to initialize port/queue */
519 port->need_reconfig = 1;
520 port->need_reconfig_queues = 1;
525 unsigned int nb_mbuf;
527 if (param_total_num_mbufs)
528 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
530 for (i = 0; i < MAX_SOCKET; i++) {
531 nb_mbuf = (nb_mbuf_per_pool *
534 mbuf_pool_create(mbuf_data_size,
539 /* Configuration of packet forwarding streams. */
540 if (init_fwd_streams() < 0)
541 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
545 init_fwd_streams(void)
548 struct rte_port *port;
549 streamid_t sm_id, nb_fwd_streams_new;
551 /* set socket id according to numa or not */
552 for (pid = 0; pid < nb_ports; pid++) {
554 if (nb_rxq > port->dev_info.max_rx_queues) {
555 printf("Fail: nb_rxq(%d) is greater than "
556 "max_rx_queues(%d)\n", nb_rxq,
557 port->dev_info.max_rx_queues);
560 if (nb_txq > port->dev_info.max_tx_queues) {
561 printf("Fail: nb_txq(%d) is greater than "
562 "max_tx_queues(%d)\n", nb_txq,
563 port->dev_info.max_tx_queues);
567 port->socket_id = rte_eth_dev_socket_id(pid);
569 if (socket_num == UMA_NO_CONFIG)
572 port->socket_id = socket_num;
576 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
577 if (nb_fwd_streams_new == nb_fwd_streams)
580 if (fwd_streams != NULL) {
581 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
582 if (fwd_streams[sm_id] == NULL)
584 rte_free(fwd_streams[sm_id]);
585 fwd_streams[sm_id] = NULL;
587 rte_free(fwd_streams);
592 nb_fwd_streams = nb_fwd_streams_new;
593 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
594 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
595 if (fwd_streams == NULL)
596 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
597 "failed\n", nb_fwd_streams);
599 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
600 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
601 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
602 if (fwd_streams[sm_id] == NULL)
603 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
610 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
612 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
614 unsigned int total_burst;
615 unsigned int nb_burst;
616 unsigned int burst_stats[3];
617 uint16_t pktnb_stats[3];
619 int burst_percent[3];
622 * First compute the total number of packet bursts and the
623 * two highest numbers of bursts of the same number of packets.
626 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
627 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
628 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
629 nb_burst = pbs->pkt_burst_spread[nb_pkt];
632 total_burst += nb_burst;
633 if (nb_burst > burst_stats[0]) {
634 burst_stats[1] = burst_stats[0];
635 pktnb_stats[1] = pktnb_stats[0];
636 burst_stats[0] = nb_burst;
637 pktnb_stats[0] = nb_pkt;
640 if (total_burst == 0)
642 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
643 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
644 burst_percent[0], (int) pktnb_stats[0]);
645 if (burst_stats[0] == total_burst) {
649 if (burst_stats[0] + burst_stats[1] == total_burst) {
650 printf(" + %d%% of %d pkts]\n",
651 100 - burst_percent[0], pktnb_stats[1]);
654 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
655 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
656 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
657 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
660 printf(" + %d%% of %d pkts + %d%% of others]\n",
661 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
663 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
666 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
668 struct rte_port *port;
671 static const char *fwd_stats_border = "----------------------";
673 port = &ports[port_id];
674 printf("\n %s Forward statistics for port %-2d %s\n",
675 fwd_stats_border, port_id, fwd_stats_border);
677 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
678 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
680 stats->ipackets, stats->ierrors,
681 (uint64_t) (stats->ipackets + stats->ierrors));
683 if (cur_fwd_eng == &csum_fwd_engine)
684 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
685 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
687 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
689 stats->opackets, port->tx_dropped,
690 (uint64_t) (stats->opackets + port->tx_dropped));
692 if (stats->rx_nombuf > 0)
693 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
697 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
699 stats->ipackets, stats->ierrors,
700 (uint64_t) (stats->ipackets + stats->ierrors));
702 if (cur_fwd_eng == &csum_fwd_engine)
703 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
704 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
706 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
708 stats->opackets, port->tx_dropped,
709 (uint64_t) (stats->opackets + port->tx_dropped));
711 if (stats->rx_nombuf > 0)
712 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
714 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
716 pkt_burst_stats_display("RX",
717 &port->rx_stream->rx_burst_stats);
719 pkt_burst_stats_display("TX",
720 &port->tx_stream->tx_burst_stats);
723 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
724 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
728 if (port->rx_queue_stats_mapping_enabled) {
730 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
731 printf(" Stats reg %2d RX-packets:%14"PRIu64
732 " RX-errors:%14"PRIu64
733 " RX-bytes:%14"PRIu64"\n",
734 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
738 if (port->tx_queue_stats_mapping_enabled) {
739 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
740 printf(" Stats reg %2d TX-packets:%14"PRIu64
741 " TX-bytes:%14"PRIu64"\n",
742 i, stats->q_opackets[i], stats->q_obytes[i]);
746 printf(" %s--------------------------------%s\n",
747 fwd_stats_border, fwd_stats_border);
751 fwd_stream_stats_display(streamid_t stream_id)
753 struct fwd_stream *fs;
754 static const char *fwd_top_stats_border = "-------";
756 fs = fwd_streams[stream_id];
757 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
758 (fs->fwd_dropped == 0))
760 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
761 "TX Port=%2d/Queue=%2d %s\n",
762 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
763 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
764 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
765 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
767 /* if checksum mode */
768 if (cur_fwd_eng == &csum_fwd_engine) {
769 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
770 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
773 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
774 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
775 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
780 flush_all_rx_queues(void)
782 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
789 for (j = 0; j < 2; j++) {
790 for (rxp = 0; rxp < nb_ports; rxp++) {
791 for (rxq = 0; rxq < nb_rxq; rxq++) {
793 nb_rx = rte_eth_rx_burst(rxp, rxq,
794 pkts_burst, MAX_PKT_BURST);
795 for (i = 0; i < nb_rx; i++)
796 rte_pktmbuf_free(pkts_burst[i]);
800 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
805 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
807 struct fwd_stream **fsm;
811 fsm = &fwd_streams[fc->stream_idx];
812 nb_fs = fc->stream_nb;
814 for (sm_id = 0; sm_id < nb_fs; sm_id++)
815 (*pkt_fwd)(fsm[sm_id]);
816 } while (! fc->stopped);
820 start_pkt_forward_on_core(void *fwd_arg)
822 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
823 cur_fwd_config.fwd_eng->packet_fwd);
828 * Run the TXONLY packet forwarding engine to send a single burst of packets.
829 * Used to start communication flows in network loopback test configurations.
832 run_one_txonly_burst_on_core(void *fwd_arg)
834 struct fwd_lcore *fwd_lc;
835 struct fwd_lcore tmp_lcore;
837 fwd_lc = (struct fwd_lcore *) fwd_arg;
839 tmp_lcore.stopped = 1;
840 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
845 * Launch packet forwarding:
846 * - Setup per-port forwarding context.
847 * - launch logical cores with their forwarding configuration.
850 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
852 port_fwd_begin_t port_fwd_begin;
857 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
858 if (port_fwd_begin != NULL) {
859 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
860 (*port_fwd_begin)(fwd_ports_ids[i]);
862 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
863 lc_id = fwd_lcores_cpuids[i];
864 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
865 fwd_lcores[i]->stopped = 0;
866 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
867 fwd_lcores[i], lc_id);
869 printf("launch lcore %u failed - diag=%d\n",
876 * Launch packet forwarding configuration.
879 start_packet_forwarding(int with_tx_first)
881 port_fwd_begin_t port_fwd_begin;
882 port_fwd_end_t port_fwd_end;
883 struct rte_port *port;
888 if (all_ports_started() == 0) {
889 printf("Not all ports were started\n");
892 if (test_done == 0) {
893 printf("Packet forwarding already started\n");
896 if((dcb_test) && (nb_fwd_lcores == 1)) {
897 printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
901 flush_all_rx_queues();
903 rxtx_config_display();
905 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
906 pt_id = fwd_ports_ids[i];
907 port = &ports[pt_id];
908 rte_eth_stats_get(pt_id, &port->stats);
909 port->tx_dropped = 0;
911 map_port_queue_stats_mapping_registers(pt_id, port);
913 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
914 fwd_streams[sm_id]->rx_packets = 0;
915 fwd_streams[sm_id]->tx_packets = 0;
916 fwd_streams[sm_id]->fwd_dropped = 0;
917 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
918 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
920 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
921 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
922 sizeof(fwd_streams[sm_id]->rx_burst_stats));
923 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
924 sizeof(fwd_streams[sm_id]->tx_burst_stats));
926 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
927 fwd_streams[sm_id]->core_cycles = 0;
931 port_fwd_begin = tx_only_engine.port_fwd_begin;
932 if (port_fwd_begin != NULL) {
933 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
934 (*port_fwd_begin)(fwd_ports_ids[i]);
936 launch_packet_forwarding(run_one_txonly_burst_on_core);
937 rte_eal_mp_wait_lcore();
938 port_fwd_end = tx_only_engine.port_fwd_end;
939 if (port_fwd_end != NULL) {
940 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
941 (*port_fwd_end)(fwd_ports_ids[i]);
944 launch_packet_forwarding(start_pkt_forward_on_core);
948 stop_packet_forwarding(void)
950 struct rte_eth_stats stats;
951 struct rte_port *port;
952 port_fwd_end_t port_fwd_end;
959 uint64_t total_rx_dropped;
960 uint64_t total_tx_dropped;
961 uint64_t total_rx_nombuf;
963 uint64_t rx_bad_ip_csum;
964 uint64_t rx_bad_l4_csum;
965 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
968 static const char *acc_stats_border = "+++++++++++++++";
970 if (all_ports_started() == 0) {
971 printf("Not all ports were started\n");
975 printf("Packet forwarding not started\n");
978 printf("Telling cores to stop...");
979 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
980 fwd_lcores[lc_id]->stopped = 1;
981 printf("\nWaiting for lcores to finish...\n");
982 rte_eal_mp_wait_lcore();
983 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
984 if (port_fwd_end != NULL) {
985 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
986 pt_id = fwd_ports_ids[i];
987 (*port_fwd_end)(pt_id);
990 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
993 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
994 if (cur_fwd_config.nb_fwd_streams >
995 cur_fwd_config.nb_fwd_ports) {
996 fwd_stream_stats_display(sm_id);
997 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
998 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1000 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1002 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1005 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1006 tx_dropped = (uint64_t) (tx_dropped +
1007 fwd_streams[sm_id]->fwd_dropped);
1008 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1011 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1012 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1013 fwd_streams[sm_id]->rx_bad_ip_csum);
1014 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1018 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1019 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1020 fwd_streams[sm_id]->rx_bad_l4_csum);
1021 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1024 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1025 fwd_cycles = (uint64_t) (fwd_cycles +
1026 fwd_streams[sm_id]->core_cycles);
1031 total_rx_dropped = 0;
1032 total_tx_dropped = 0;
1033 total_rx_nombuf = 0;
1034 for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
1035 pt_id = fwd_ports_ids[i];
1037 port = &ports[pt_id];
1038 rte_eth_stats_get(pt_id, &stats);
1039 stats.ipackets -= port->stats.ipackets;
1040 port->stats.ipackets = 0;
1041 stats.opackets -= port->stats.opackets;
1042 port->stats.opackets = 0;
1043 stats.ibytes -= port->stats.ibytes;
1044 port->stats.ibytes = 0;
1045 stats.obytes -= port->stats.obytes;
1046 port->stats.obytes = 0;
1047 stats.ierrors -= port->stats.ierrors;
1048 port->stats.ierrors = 0;
1049 stats.oerrors -= port->stats.oerrors;
1050 port->stats.oerrors = 0;
1051 stats.rx_nombuf -= port->stats.rx_nombuf;
1052 port->stats.rx_nombuf = 0;
1053 stats.fdirmatch -= port->stats.fdirmatch;
1054 port->stats.rx_nombuf = 0;
1055 stats.fdirmiss -= port->stats.fdirmiss;
1056 port->stats.rx_nombuf = 0;
1058 total_recv += stats.ipackets;
1059 total_xmit += stats.opackets;
1060 total_rx_dropped += stats.ierrors;
1061 total_tx_dropped += port->tx_dropped;
1062 total_rx_nombuf += stats.rx_nombuf;
1064 fwd_port_stats_display(pt_id, &stats);
1066 printf("\n %s Accumulated forward statistics for all ports"
1068 acc_stats_border, acc_stats_border);
1069 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1071 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1073 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1074 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1075 if (total_rx_nombuf > 0)
1076 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1077 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1079 acc_stats_border, acc_stats_border);
1080 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1082 printf("\n CPU cycles/packet=%u (total cycles="
1083 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1084 (unsigned int)(fwd_cycles / total_recv),
1085 fwd_cycles, total_recv);
1087 printf("\nDone.\n");
1092 all_ports_started(void)
1095 struct rte_port *port;
1097 for (pi = 0; pi < nb_ports; pi++) {
1099 /* Check if there is a port which is not started */
1100 if (port->port_status != RTE_PORT_STARTED)
1104 /* No port is not started */
1109 start_port(portid_t pid)
1111 int diag, need_check_link_status = 0;
1114 struct rte_port *port;
1116 if (test_done == 0) {
1117 printf("Please stop forwarding first\n");
1121 if (init_fwd_streams() < 0) {
1122 printf("Fail from init_fwd_streams()\n");
1128 for (pi = 0; pi < nb_ports; pi++) {
1129 if (pid < nb_ports && pid != pi)
1133 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1134 RTE_PORT_HANDLING) == 0) {
1135 printf("Port %d is now not stopped\n", pi);
1139 if (port->need_reconfig > 0) {
1140 port->need_reconfig = 0;
1142 printf("Configuring Port %d (socket %d)\n", pi,
1143 rte_eth_dev_socket_id(pi));
1144 /* configure port */
1145 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1148 if (rte_atomic16_cmpset(&(port->port_status),
1149 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1150 printf("Port %d can not be set back "
1151 "to stopped\n", pi);
1152 printf("Fail to configure port %d\n", pi);
1153 /* try to reconfigure port next time */
1154 port->need_reconfig = 1;
1158 if (port->need_reconfig_queues > 0) {
1159 port->need_reconfig_queues = 0;
1160 /* setup tx queues */
1161 for (qi = 0; qi < nb_txq; qi++) {
1162 if ((numa_support) &&
1163 (txring_numa[pi] != NUMA_NO_CONFIG))
1164 diag = rte_eth_tx_queue_setup(pi, qi,
1165 nb_txd,txring_numa[pi],
1168 diag = rte_eth_tx_queue_setup(pi, qi,
1169 nb_txd,port->socket_id,
1175 /* Fail to setup tx queue, return */
1176 if (rte_atomic16_cmpset(&(port->port_status),
1178 RTE_PORT_STOPPED) == 0)
1179 printf("Port %d can not be set back "
1180 "to stopped\n", pi);
1181 printf("Fail to configure port %d tx queues\n", pi);
1182 /* try to reconfigure queues next time */
1183 port->need_reconfig_queues = 1;
1186 /* setup rx queues */
1187 for (qi = 0; qi < nb_rxq; qi++) {
1188 if ((numa_support) &&
1189 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1190 struct rte_mempool * mp =
1191 mbuf_pool_find(rxring_numa[pi]);
1193 printf("Failed to setup RX queue:"
1194 "No mempool allocation"
1195 "on the socket %d\n",
1200 diag = rte_eth_rx_queue_setup(pi, qi,
1201 nb_rxd,rxring_numa[pi],
1202 &(port->rx_conf),mp);
1205 diag = rte_eth_rx_queue_setup(pi, qi,
1206 nb_rxd,port->socket_id,
1208 mbuf_pool_find(port->socket_id));
1214 /* Fail to setup rx queue, return */
1215 if (rte_atomic16_cmpset(&(port->port_status),
1217 RTE_PORT_STOPPED) == 0)
1218 printf("Port %d can not be set back "
1219 "to stopped\n", pi);
1220 printf("Fail to configure port %d rx queues\n", pi);
1221 /* try to reconfigure queues next time */
1222 port->need_reconfig_queues = 1;
1227 if (rte_eth_dev_start(pi) < 0) {
1228 printf("Fail to start port %d\n", pi);
1230 /* Fail to setup rx queue, return */
1231 if (rte_atomic16_cmpset(&(port->port_status),
1232 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1233 printf("Port %d can not be set back to "
1238 if (rte_atomic16_cmpset(&(port->port_status),
1239 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1240 printf("Port %d can not be set into started\n", pi);
1242 /* at least one port started, need checking link status */
1243 need_check_link_status = 1;
1246 if (need_check_link_status)
1247 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1249 printf("Please stop the ports first\n");
1255 stop_port(portid_t pid)
1258 struct rte_port *port;
1259 int need_check_link_status = 0;
1261 if (test_done == 0) {
1262 printf("Please stop forwarding first\n");
1269 printf("Stopping ports...\n");
1271 for (pi = 0; pi < nb_ports; pi++) {
1272 if (pid < nb_ports && pid != pi)
1276 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1277 RTE_PORT_HANDLING) == 0)
1280 rte_eth_dev_stop(pi);
1282 if (rte_atomic16_cmpset(&(port->port_status),
1283 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1284 printf("Port %d can not be set into stopped\n", pi);
1285 need_check_link_status = 1;
1287 if (need_check_link_status)
1288 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1294 close_port(portid_t pid)
1297 struct rte_port *port;
1299 if (test_done == 0) {
1300 printf("Please stop forwarding first\n");
1304 printf("Closing ports...\n");
1306 for (pi = 0; pi < nb_ports; pi++) {
1307 if (pid < nb_ports && pid != pi)
1311 if (rte_atomic16_cmpset(&(port->port_status),
1312 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1313 printf("Port %d is now not stopped\n", pi);
1317 rte_eth_dev_close(pi);
1319 if (rte_atomic16_cmpset(&(port->port_status),
1320 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1321 printf("Port %d can not be set into stopped\n", pi);
1328 all_ports_stopped(void)
1331 struct rte_port *port;
1333 for (pi = 0; pi < nb_ports; pi++) {
1335 if (port->port_status != RTE_PORT_STOPPED)
1347 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1348 printf("Stopping port %d...", pt_id);
1350 rte_eth_dev_close(pt_id);
1356 typedef void (*cmd_func_t)(void);
1357 struct pmd_test_command {
1358 const char *cmd_name;
1359 cmd_func_t cmd_func;
1362 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1364 /* Check the link status of all ports in up to 9s, and print them finally */
1366 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1368 #define CHECK_INTERVAL 100 /* 100ms */
1369 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1370 uint8_t portid, count, all_ports_up, print_flag = 0;
1371 struct rte_eth_link link;
1373 printf("Checking link statuses...\n");
1375 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1377 for (portid = 0; portid < port_num; portid++) {
1378 if ((port_mask & (1 << portid)) == 0)
1380 memset(&link, 0, sizeof(link));
1381 rte_eth_link_get_nowait(portid, &link);
1382 /* print link status if flag set */
1383 if (print_flag == 1) {
1384 if (link.link_status)
1385 printf("Port %d Link Up - speed %u "
1386 "Mbps - %s\n", (uint8_t)portid,
1387 (unsigned)link.link_speed,
1388 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1389 ("full-duplex") : ("half-duplex\n"));
1391 printf("Port %d Link Down\n",
1395 /* clear all_ports_up flag if any link down */
1396 if (link.link_status == 0) {
1401 /* after finally printing all link status, get out */
1402 if (print_flag == 1)
1405 if (all_ports_up == 0) {
1407 rte_delay_ms(CHECK_INTERVAL);
1410 /* set the print_flag if all ports up or timeout */
1411 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1418 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1422 uint8_t mapping_found = 0;
1424 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1425 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1426 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1427 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1428 tx_queue_stats_mappings[i].queue_id,
1429 tx_queue_stats_mappings[i].stats_counter_id);
1436 port->tx_queue_stats_mapping_enabled = 1;
1441 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1445 uint8_t mapping_found = 0;
1447 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1448 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1449 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1450 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1451 rx_queue_stats_mappings[i].queue_id,
1452 rx_queue_stats_mappings[i].stats_counter_id);
1459 port->rx_queue_stats_mapping_enabled = 1;
1464 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1468 diag = set_tx_queue_stats_mapping_registers(pi, port);
1470 if (diag == -ENOTSUP) {
1471 port->tx_queue_stats_mapping_enabled = 0;
1472 printf("TX queue stats mapping not supported port id=%d\n", pi);
1475 rte_exit(EXIT_FAILURE,
1476 "set_tx_queue_stats_mapping_registers "
1477 "failed for port id=%d diag=%d\n",
1481 diag = set_rx_queue_stats_mapping_registers(pi, port);
1483 if (diag == -ENOTSUP) {
1484 port->rx_queue_stats_mapping_enabled = 0;
1485 printf("RX queue stats mapping not supported port id=%d\n", pi);
1488 rte_exit(EXIT_FAILURE,
1489 "set_rx_queue_stats_mapping_registers "
1490 "failed for port id=%d diag=%d\n",
1496 init_port_config(void)
1499 struct rte_port *port;
1501 for (pid = 0; pid < nb_ports; pid++) {
1503 port->dev_conf.rxmode = rx_mode;
1504 port->dev_conf.fdir_conf = fdir_conf;
1506 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1507 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1509 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1510 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1512 port->rx_conf.rx_thresh = rx_thresh;
1513 port->rx_conf.rx_free_thresh = rx_free_thresh;
1514 port->rx_conf.rx_drop_en = rx_drop_en;
1515 port->tx_conf.tx_thresh = tx_thresh;
1516 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1517 port->tx_conf.tx_free_thresh = tx_free_thresh;
1518 port->tx_conf.txq_flags = txq_flags;
1520 rte_eth_macaddr_get(pid, &port->eth_addr);
1522 map_port_queue_stats_mapping_registers(pid, port);
1526 const uint16_t vlan_tags[] = {
1527 0, 1, 2, 3, 4, 5, 6, 7,
1528 8, 9, 10, 11, 12, 13, 14, 15,
1529 16, 17, 18, 19, 20, 21, 22, 23,
1530 24, 25, 26, 27, 28, 29, 30, 31
1534 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1539 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1540 * given above, and the number of traffic classes available for use.
1542 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1543 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1544 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1546 /* VMDQ+DCB RX and TX configrations */
1547 vmdq_rx_conf.enable_default_pool = 0;
1548 vmdq_rx_conf.default_pool = 0;
1549 vmdq_rx_conf.nb_queue_pools =
1550 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1551 vmdq_tx_conf.nb_queue_pools =
1552 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1554 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1555 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1556 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1557 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1559 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1560 vmdq_rx_conf.dcb_queue[i] = i;
1561 vmdq_tx_conf.dcb_queue[i] = i;
1564 /*set DCB mode of RX and TX of multiple queues*/
1565 eth_conf->rxmode.mq_mode = ETH_VMDQ_DCB;
1566 eth_conf->txmode.mq_mode = ETH_VMDQ_DCB_TX;
1567 if (dcb_conf->pfc_en)
1568 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1570 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1572 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1573 sizeof(struct rte_eth_vmdq_dcb_conf)));
1574 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1575 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1578 struct rte_eth_dcb_rx_conf rx_conf;
1579 struct rte_eth_dcb_tx_conf tx_conf;
1581 /* queue mapping configuration of DCB RX and TX */
1582 if (dcb_conf->num_tcs == ETH_4_TCS)
1583 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1585 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1587 rx_conf.nb_tcs = dcb_conf->num_tcs;
1588 tx_conf.nb_tcs = dcb_conf->num_tcs;
1590 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1591 rx_conf.dcb_queue[i] = i;
1592 tx_conf.dcb_queue[i] = i;
1594 eth_conf->rxmode.mq_mode = ETH_DCB_RX;
1595 eth_conf->txmode.mq_mode = ETH_DCB_TX;
1596 if (dcb_conf->pfc_en)
1597 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1599 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1601 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1602 sizeof(struct rte_eth_dcb_rx_conf)));
1603 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1604 sizeof(struct rte_eth_dcb_tx_conf)));
1611 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1613 struct rte_eth_conf port_conf;
1614 struct rte_port *rte_port;
1619 /* rxq and txq configuration in dcb mode */
1622 rx_free_thresh = 64;
1624 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1625 /* Enter DCB configuration status */
1628 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1629 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1630 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1634 rte_port = &ports[pid];
1635 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1637 rte_port->rx_conf.rx_thresh = rx_thresh;
1638 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1639 rte_port->tx_conf.tx_thresh = tx_thresh;
1640 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1641 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1643 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1644 for (i = 0; i < nb_vlan; i++){
1645 rx_vft_set(pid, vlan_tags[i], 1);
1648 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1649 map_port_queue_stats_mapping_registers(pid, rte_port);
1654 #ifdef RTE_EXEC_ENV_BAREMETAL
1659 main(int argc, char** argv)
1664 diag = rte_eal_init(argc, argv);
1666 rte_panic("Cannot init EAL\n");
1668 if (rte_pmd_init_all())
1669 rte_panic("Cannot init PMD\n");
1671 if (rte_eal_pci_probe())
1672 rte_panic("Cannot probe PCI\n");
1674 nb_ports = (portid_t) rte_eth_dev_count();
1676 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1678 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1679 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1680 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1681 "configuration file\n");
1683 set_def_fwd_config();
1685 rte_panic("Empty set of forwarding logical cores - check the "
1686 "core mask supplied in the command parameters\n");
1691 launch_args_parse(argc, argv);
1693 if (nb_rxq > nb_txq)
1694 printf("Warning: nb_rxq=%d enables RSS configuration, "
1695 "but nb_txq=%d will prevent to fully test it.\n",
1699 start_port(RTE_PORT_ALL);
1701 /* set all ports to promiscuous mode by default */
1702 for (port_id = 0; port_id < nb_ports; port_id++)
1703 rte_eth_promiscuous_enable(port_id);
1705 if (interactive == 1)
1711 printf("No commandline core given, start packet forwarding\n");
1712 start_packet_forwarding(0);
1713 printf("Press enter to exit\n");
1714 rc = read(0, &c, 1);