4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_tailq.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
78 uint16_t verbose_level = 0; /**< Silent by default. */
80 /* use master core for command line ? */
81 uint8_t interactive = 0;
84 * NUMA support configuration.
85 * When set, the NUMA support attempts to dispatch the allocation of the
86 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
87 * probed ports among the CPU sockets 0 and 1.
88 * Otherwise, all memory is allocated from CPU socket 0.
90 uint8_t numa_support = 0; /**< No numa support by default */
93 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
96 uint8_t socket_num = UMA_NO_CONFIG;
99 * Record the Ethernet address of peer target ports to which packets are
101 * Must be instanciated with the ethernet addresses of peer traffic generator
104 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
105 portid_t nb_peer_eth_addrs = 0;
108 * Probed Target Environment.
110 struct rte_port *ports; /**< For all probed ethernet ports. */
111 portid_t nb_ports; /**< Number of probed ethernet ports. */
112 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
113 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
116 * Test Forwarding Configuration.
117 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
118 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
120 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
121 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
122 portid_t nb_cfg_ports; /**< Number of configured ports. */
123 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
125 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
126 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
128 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
129 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
132 * Forwarding engines.
134 struct fwd_engine * fwd_engines[] = {
140 #ifdef RTE_LIBRTE_IEEE1588
141 &ieee1588_fwd_engine,
146 struct fwd_config cur_fwd_config;
147 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
149 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
150 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
151 * specified on command-line. */
154 * Configuration of packet segments used by the "txonly" processing engine.
156 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
157 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
158 TXONLY_DEF_PACKET_LEN,
160 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
162 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
163 uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
165 /* current configuration is in DCB or not,0 means it is not in DCB mode */
166 uint8_t dcb_config = 0;
168 /* Whether the dcb is in testing status */
169 uint8_t dcb_test = 0;
171 /* DCB on and VT on mapping is default */
172 enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
175 * Configurable number of RX/TX queues.
177 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
178 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
181 * Configurable number of RX/TX ring descriptors.
183 #define RTE_TEST_RX_DESC_DEFAULT 128
184 #define RTE_TEST_TX_DESC_DEFAULT 512
185 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
186 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
189 * Configurable values of RX and TX ring threshold registers.
191 #define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
192 #define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
193 #define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
195 #define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
196 #define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
197 #define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
199 struct rte_eth_thresh rx_thresh = {
200 .pthresh = RX_PTHRESH,
201 .hthresh = RX_HTHRESH,
202 .wthresh = RX_WTHRESH,
205 struct rte_eth_thresh tx_thresh = {
206 .pthresh = TX_PTHRESH,
207 .hthresh = TX_HTHRESH,
208 .wthresh = TX_WTHRESH,
212 * Configurable value of RX free threshold.
214 uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
217 * Configurable value of RX drop enable.
219 uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
222 * Configurable value of TX free threshold.
224 uint16_t tx_free_thresh = 0; /* Use default values. */
227 * Configurable value of TX RS bit threshold.
229 uint16_t tx_rs_thresh = 0; /* Use default values. */
232 * Configurable value of TX queue flags.
234 uint32_t txq_flags = 0; /* No flags set. */
237 * Receive Side Scaling (RSS) configuration.
239 uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
242 * Port topology configuration
244 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
247 * Ethernet device configuration.
249 struct rte_eth_rxmode rx_mode = {
250 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
252 .header_split = 0, /**< Header Split disabled. */
253 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
254 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
255 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
256 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
257 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
258 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
261 struct rte_fdir_conf fdir_conf = {
262 .mode = RTE_FDIR_MODE_NONE,
263 .pballoc = RTE_FDIR_PBALLOC_64K,
264 .status = RTE_FDIR_REPORT_STATUS,
265 .flexbytes_offset = 0x6,
269 static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
271 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
272 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
274 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
275 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
277 uint16_t nb_tx_queue_stats_mappings = 0;
278 uint16_t nb_rx_queue_stats_mappings = 0;
280 /* Forward function declarations */
281 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
282 static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
285 * Check if all the ports are started.
286 * If yes, return positive value. If not, return zero.
288 static int all_ports_started(void);
291 * Setup default configuration.
294 set_default_fwd_lcores_config(void)
300 for (i = 0; i < RTE_MAX_LCORE; i++) {
301 if (! rte_lcore_is_enabled(i))
303 if (i == rte_get_master_lcore())
305 fwd_lcores_cpuids[nb_lc++] = i;
307 nb_lcores = (lcoreid_t) nb_lc;
308 nb_cfg_lcores = nb_lcores;
313 set_def_peer_eth_addrs(void)
317 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
318 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
319 peer_eth_addrs[i].addr_bytes[5] = i;
324 set_default_fwd_ports_config(void)
328 for (pt_id = 0; pt_id < nb_ports; pt_id++)
329 fwd_ports_ids[pt_id] = pt_id;
331 nb_cfg_ports = nb_ports;
332 nb_fwd_ports = nb_ports;
336 set_def_fwd_config(void)
338 set_default_fwd_lcores_config();
339 set_def_peer_eth_addrs();
340 set_default_fwd_ports_config();
344 * Configuration initialisation done once at init time.
346 struct mbuf_ctor_arg {
347 uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
348 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
351 struct mbuf_pool_ctor_arg {
352 uint16_t seg_buf_size; /**< size of data segment in mbuf. */
356 testpmd_mbuf_ctor(struct rte_mempool *mp,
359 __attribute__((unused)) unsigned i)
361 struct mbuf_ctor_arg *mb_ctor_arg;
364 mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
365 mb = (struct rte_mbuf *) raw_mbuf;
367 mb->type = RTE_MBUF_PKT;
369 mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
370 mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
371 mb_ctor_arg->seg_buf_offset);
372 mb->buf_len = mb_ctor_arg->seg_buf_size;
373 mb->type = RTE_MBUF_PKT;
375 mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
377 mb->pkt.vlan_macip.data = 0;
378 mb->pkt.hash.rss = 0;
382 testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
385 struct mbuf_pool_ctor_arg *mbp_ctor_arg;
386 struct rte_pktmbuf_pool_private *mbp_priv;
388 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
389 printf("%s(%s) private_data_size %d < %d\n",
390 __func__, mp->name, (int) mp->private_data_size,
391 (int) sizeof(struct rte_pktmbuf_pool_private));
394 mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
395 mbp_priv = (struct rte_pktmbuf_pool_private *)
396 ((char *)mp + sizeof(struct rte_mempool));
397 mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
401 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
402 unsigned int socket_id)
404 char pool_name[RTE_MEMPOOL_NAMESIZE];
405 struct rte_mempool *rte_mp;
406 struct mbuf_pool_ctor_arg mbp_ctor_arg;
407 struct mbuf_ctor_arg mb_ctor_arg;
410 mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
412 mb_ctor_arg.seg_buf_offset =
413 (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
414 mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
415 mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
416 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
417 rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
418 (unsigned) mb_mempool_cache,
419 sizeof(struct rte_pktmbuf_pool_private),
420 testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
421 testpmd_mbuf_ctor, &mb_ctor_arg,
423 if (rte_mp == NULL) {
424 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
425 "failed\n", socket_id);
433 struct rte_port *port;
434 struct rte_mempool *mbp;
435 unsigned int nb_mbuf_per_pool;
437 uint8_t port_per_socket[MAX_SOCKET];
439 memset(port_per_socket,0,MAX_SOCKET);
440 /* Configuration of logical cores. */
441 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
442 sizeof(struct fwd_lcore *) * nb_lcores,
444 if (fwd_lcores == NULL) {
445 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
446 "failed\n", nb_lcores);
448 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
449 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
450 sizeof(struct fwd_lcore),
452 if (fwd_lcores[lc_id] == NULL) {
453 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
456 fwd_lcores[lc_id]->cpuid_idx = lc_id;
460 * Create pools of mbuf.
461 * If NUMA support is disabled, create a single pool of mbuf in
462 * socket 0 memory by default.
463 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
465 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
466 * nb_txd can be configured at run time.
468 if (param_total_num_mbufs)
469 nb_mbuf_per_pool = param_total_num_mbufs;
471 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
472 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
475 nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
479 if (socket_num == UMA_NO_CONFIG)
480 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
482 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
486 * Records which Mbuf pool to use by each logical core, if needed.
488 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
489 mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
491 mbp = mbuf_pool_find(0);
492 fwd_lcores[lc_id]->mbp = mbp;
495 /* Configuration of Ethernet ports. */
496 ports = rte_zmalloc("testpmd: ports",
497 sizeof(struct rte_port) * nb_ports,
500 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
501 "failed\n", nb_ports);
504 for (pid = 0; pid < nb_ports; pid++) {
506 rte_eth_dev_info_get(pid, &port->dev_info);
509 if (port_numa[pid] != NUMA_NO_CONFIG)
510 port_per_socket[port_numa[pid]]++;
512 uint32_t socket_id = rte_eth_dev_socket_id(pid);
513 port_per_socket[socket_id]++;
517 /* set flag to initialize port/queue */
518 port->need_reconfig = 1;
519 port->need_reconfig_queues = 1;
524 unsigned int nb_mbuf;
526 if (param_total_num_mbufs)
527 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
529 for (i = 0; i < MAX_SOCKET; i++) {
530 nb_mbuf = (nb_mbuf_per_pool *
533 mbuf_pool_create(mbuf_data_size,
538 /* Configuration of packet forwarding streams. */
539 if (init_fwd_streams() < 0)
540 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
544 init_fwd_streams(void)
547 struct rte_port *port;
548 streamid_t sm_id, nb_fwd_streams_new;
550 /* set socket id according to numa or not */
551 for (pid = 0; pid < nb_ports; pid++) {
553 if (nb_rxq > port->dev_info.max_rx_queues) {
554 printf("Fail: nb_rxq(%d) is greater than "
555 "max_rx_queues(%d)\n", nb_rxq,
556 port->dev_info.max_rx_queues);
559 if (nb_txq > port->dev_info.max_tx_queues) {
560 printf("Fail: nb_txq(%d) is greater than "
561 "max_tx_queues(%d)\n", nb_txq,
562 port->dev_info.max_tx_queues);
566 port->socket_id = rte_eth_dev_socket_id(pid);
568 if (socket_num == UMA_NO_CONFIG)
571 port->socket_id = socket_num;
575 nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
576 if (nb_fwd_streams_new == nb_fwd_streams)
579 if (fwd_streams != NULL) {
580 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
581 if (fwd_streams[sm_id] == NULL)
583 rte_free(fwd_streams[sm_id]);
584 fwd_streams[sm_id] = NULL;
586 rte_free(fwd_streams);
591 nb_fwd_streams = nb_fwd_streams_new;
592 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
593 sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
594 if (fwd_streams == NULL)
595 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
596 "failed\n", nb_fwd_streams);
598 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
599 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
600 sizeof(struct fwd_stream), CACHE_LINE_SIZE);
601 if (fwd_streams[sm_id] == NULL)
602 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
609 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
611 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
613 unsigned int total_burst;
614 unsigned int nb_burst;
615 unsigned int burst_stats[3];
616 uint16_t pktnb_stats[3];
618 int burst_percent[3];
621 * First compute the total number of packet bursts and the
622 * two highest numbers of bursts of the same number of packets.
625 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
626 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
627 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
628 nb_burst = pbs->pkt_burst_spread[nb_pkt];
631 total_burst += nb_burst;
632 if (nb_burst > burst_stats[0]) {
633 burst_stats[1] = burst_stats[0];
634 pktnb_stats[1] = pktnb_stats[0];
635 burst_stats[0] = nb_burst;
636 pktnb_stats[0] = nb_pkt;
639 if (total_burst == 0)
641 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
642 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
643 burst_percent[0], (int) pktnb_stats[0]);
644 if (burst_stats[0] == total_burst) {
648 if (burst_stats[0] + burst_stats[1] == total_burst) {
649 printf(" + %d%% of %d pkts]\n",
650 100 - burst_percent[0], pktnb_stats[1]);
653 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
654 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
655 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
656 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
659 printf(" + %d%% of %d pkts + %d%% of others]\n",
660 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
662 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
665 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
667 struct rte_port *port;
670 static const char *fwd_stats_border = "----------------------";
672 port = &ports[port_id];
673 printf("\n %s Forward statistics for port %-2d %s\n",
674 fwd_stats_border, port_id, fwd_stats_border);
676 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
677 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
679 stats->ipackets, stats->ierrors,
680 (uint64_t) (stats->ipackets + stats->ierrors));
682 if (cur_fwd_eng == &csum_fwd_engine)
683 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
684 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
686 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
688 stats->opackets, port->tx_dropped,
689 (uint64_t) (stats->opackets + port->tx_dropped));
691 if (stats->rx_nombuf > 0)
692 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
696 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
698 stats->ipackets, stats->ierrors,
699 (uint64_t) (stats->ipackets + stats->ierrors));
701 if (cur_fwd_eng == &csum_fwd_engine)
702 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
703 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
705 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
707 stats->opackets, port->tx_dropped,
708 (uint64_t) (stats->opackets + port->tx_dropped));
710 if (stats->rx_nombuf > 0)
711 printf(" RX-nombufs:%14"PRIu64"\n", stats->rx_nombuf);
713 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
715 pkt_burst_stats_display("RX",
716 &port->rx_stream->rx_burst_stats);
718 pkt_burst_stats_display("TX",
719 &port->tx_stream->tx_burst_stats);
722 if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
723 printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
727 if (port->rx_queue_stats_mapping_enabled) {
729 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
730 printf(" Stats reg %2d RX-packets:%14"PRIu64
731 " RX-errors:%14"PRIu64
732 " RX-bytes:%14"PRIu64"\n",
733 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
737 if (port->tx_queue_stats_mapping_enabled) {
738 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
739 printf(" Stats reg %2d TX-packets:%14"PRIu64
740 " TX-bytes:%14"PRIu64"\n",
741 i, stats->q_opackets[i], stats->q_obytes[i]);
745 printf(" %s--------------------------------%s\n",
746 fwd_stats_border, fwd_stats_border);
750 fwd_stream_stats_display(streamid_t stream_id)
752 struct fwd_stream *fs;
753 static const char *fwd_top_stats_border = "-------";
755 fs = fwd_streams[stream_id];
756 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
757 (fs->fwd_dropped == 0))
759 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
760 "TX Port=%2d/Queue=%2d %s\n",
761 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
762 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
763 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
764 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
766 /* if checksum mode */
767 if (cur_fwd_eng == &csum_fwd_engine) {
768 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
769 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
772 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
773 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
774 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
779 flush_all_rx_queues(void)
781 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
788 for (j = 0; j < 2; j++) {
789 for (rxp = 0; rxp < nb_ports; rxp++) {
790 for (rxq = 0; rxq < nb_rxq; rxq++) {
792 nb_rx = rte_eth_rx_burst(rxp, rxq,
793 pkts_burst, MAX_PKT_BURST);
794 for (i = 0; i < nb_rx; i++)
795 rte_pktmbuf_free(pkts_burst[i]);
799 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
804 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
806 struct fwd_stream **fsm;
810 fsm = &fwd_streams[fc->stream_idx];
811 nb_fs = fc->stream_nb;
813 for (sm_id = 0; sm_id < nb_fs; sm_id++)
814 (*pkt_fwd)(fsm[sm_id]);
815 } while (! fc->stopped);
819 start_pkt_forward_on_core(void *fwd_arg)
821 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
822 cur_fwd_config.fwd_eng->packet_fwd);
827 * Run the TXONLY packet forwarding engine to send a single burst of packets.
828 * Used to start communication flows in network loopback test configurations.
831 run_one_txonly_burst_on_core(void *fwd_arg)
833 struct fwd_lcore *fwd_lc;
834 struct fwd_lcore tmp_lcore;
836 fwd_lc = (struct fwd_lcore *) fwd_arg;
838 tmp_lcore.stopped = 1;
839 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
844 * Launch packet forwarding:
845 * - Setup per-port forwarding context.
846 * - launch logical cores with their forwarding configuration.
849 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
851 port_fwd_begin_t port_fwd_begin;
856 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
857 if (port_fwd_begin != NULL) {
858 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
859 (*port_fwd_begin)(fwd_ports_ids[i]);
861 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
862 lc_id = fwd_lcores_cpuids[i];
863 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
864 fwd_lcores[i]->stopped = 0;
865 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
866 fwd_lcores[i], lc_id);
868 printf("launch lcore %u failed - diag=%d\n",
875 * Launch packet forwarding configuration.
878 start_packet_forwarding(int with_tx_first)
880 port_fwd_begin_t port_fwd_begin;
881 port_fwd_end_t port_fwd_end;
882 struct rte_port *port;
887 if (all_ports_started() == 0) {
888 printf("Not all ports were started\n");
891 if (test_done == 0) {
892 printf("Packet forwarding already started\n");
895 if((dcb_test) && (nb_fwd_lcores == 1)) {
896 printf("In DCB mode,the nb forwarding cores should be larger than 1.\n");
900 flush_all_rx_queues();
902 rxtx_config_display();
904 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
905 pt_id = fwd_ports_ids[i];
906 port = &ports[pt_id];
907 rte_eth_stats_get(pt_id, &port->stats);
908 port->tx_dropped = 0;
910 map_port_queue_stats_mapping_registers(pt_id, port);
912 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
913 fwd_streams[sm_id]->rx_packets = 0;
914 fwd_streams[sm_id]->tx_packets = 0;
915 fwd_streams[sm_id]->fwd_dropped = 0;
916 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
917 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
919 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
920 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
921 sizeof(fwd_streams[sm_id]->rx_burst_stats));
922 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
923 sizeof(fwd_streams[sm_id]->tx_burst_stats));
925 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
926 fwd_streams[sm_id]->core_cycles = 0;
930 port_fwd_begin = tx_only_engine.port_fwd_begin;
931 if (port_fwd_begin != NULL) {
932 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
933 (*port_fwd_begin)(fwd_ports_ids[i]);
935 launch_packet_forwarding(run_one_txonly_burst_on_core);
936 rte_eal_mp_wait_lcore();
937 port_fwd_end = tx_only_engine.port_fwd_end;
938 if (port_fwd_end != NULL) {
939 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
940 (*port_fwd_end)(fwd_ports_ids[i]);
943 launch_packet_forwarding(start_pkt_forward_on_core);
947 stop_packet_forwarding(void)
949 struct rte_eth_stats stats;
950 struct rte_port *port;
951 port_fwd_end_t port_fwd_end;
958 uint64_t total_rx_dropped;
959 uint64_t total_tx_dropped;
960 uint64_t total_rx_nombuf;
962 uint64_t rx_bad_ip_csum;
963 uint64_t rx_bad_l4_csum;
964 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
967 static const char *acc_stats_border = "+++++++++++++++";
969 if (all_ports_started() == 0) {
970 printf("Not all ports were started\n");
974 printf("Packet forwarding not started\n");
977 printf("Telling cores to stop...");
978 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
979 fwd_lcores[lc_id]->stopped = 1;
980 printf("\nWaiting for lcores to finish...\n");
981 rte_eal_mp_wait_lcore();
982 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
983 if (port_fwd_end != NULL) {
984 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
985 pt_id = fwd_ports_ids[i];
986 (*port_fwd_end)(pt_id);
989 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
992 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
993 if (cur_fwd_config.nb_fwd_streams >
994 cur_fwd_config.nb_fwd_ports) {
995 fwd_stream_stats_display(sm_id);
996 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
997 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
999 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1001 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1004 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1005 tx_dropped = (uint64_t) (tx_dropped +
1006 fwd_streams[sm_id]->fwd_dropped);
1007 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1010 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1011 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1012 fwd_streams[sm_id]->rx_bad_ip_csum);
1013 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1017 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1018 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1019 fwd_streams[sm_id]->rx_bad_l4_csum);
1020 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1023 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1024 fwd_cycles = (uint64_t) (fwd_cycles +
1025 fwd_streams[sm_id]->core_cycles);
1030 total_rx_dropped = 0;
1031 total_tx_dropped = 0;
1032 total_rx_nombuf = 0;
1033 for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
1034 pt_id = fwd_ports_ids[i];
1036 port = &ports[pt_id];
1037 rte_eth_stats_get(pt_id, &stats);
1038 stats.ipackets -= port->stats.ipackets;
1039 port->stats.ipackets = 0;
1040 stats.opackets -= port->stats.opackets;
1041 port->stats.opackets = 0;
1042 stats.ibytes -= port->stats.ibytes;
1043 port->stats.ibytes = 0;
1044 stats.obytes -= port->stats.obytes;
1045 port->stats.obytes = 0;
1046 stats.ierrors -= port->stats.ierrors;
1047 port->stats.ierrors = 0;
1048 stats.oerrors -= port->stats.oerrors;
1049 port->stats.oerrors = 0;
1050 stats.rx_nombuf -= port->stats.rx_nombuf;
1051 port->stats.rx_nombuf = 0;
1052 stats.fdirmatch -= port->stats.fdirmatch;
1053 port->stats.rx_nombuf = 0;
1054 stats.fdirmiss -= port->stats.fdirmiss;
1055 port->stats.rx_nombuf = 0;
1057 total_recv += stats.ipackets;
1058 total_xmit += stats.opackets;
1059 total_rx_dropped += stats.ierrors;
1060 total_tx_dropped += port->tx_dropped;
1061 total_rx_nombuf += stats.rx_nombuf;
1063 fwd_port_stats_display(pt_id, &stats);
1065 printf("\n %s Accumulated forward statistics for all ports"
1067 acc_stats_border, acc_stats_border);
1068 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1070 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1072 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1073 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1074 if (total_rx_nombuf > 0)
1075 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1076 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1078 acc_stats_border, acc_stats_border);
1079 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1081 printf("\n CPU cycles/packet=%u (total cycles="
1082 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1083 (unsigned int)(fwd_cycles / total_recv),
1084 fwd_cycles, total_recv);
1086 printf("\nDone.\n");
1091 all_ports_started(void)
1094 struct rte_port *port;
1096 for (pi = 0; pi < nb_ports; pi++) {
1098 /* Check if there is a port which is not started */
1099 if (port->port_status != RTE_PORT_STARTED)
1103 /* No port is not started */
1108 start_port(portid_t pid)
1110 int diag, need_check_link_status = 0;
1113 struct rte_port *port;
1115 if (test_done == 0) {
1116 printf("Please stop forwarding first\n");
1120 if (init_fwd_streams() < 0) {
1121 printf("Fail from init_fwd_streams()\n");
1127 for (pi = 0; pi < nb_ports; pi++) {
1128 if (pid < nb_ports && pid != pi)
1132 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1133 RTE_PORT_HANDLING) == 0) {
1134 printf("Port %d is now not stopped\n", pi);
1138 if (port->need_reconfig > 0) {
1139 port->need_reconfig = 0;
1141 printf("Configuring Port %d (socket %d)\n", pi,
1142 rte_eth_dev_socket_id(pi));
1143 /* configure port */
1144 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1147 if (rte_atomic16_cmpset(&(port->port_status),
1148 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1149 printf("Port %d can not be set back "
1150 "to stopped\n", pi);
1151 printf("Fail to configure port %d\n", pi);
1152 /* try to reconfigure port next time */
1153 port->need_reconfig = 1;
1157 if (port->need_reconfig_queues > 0) {
1158 port->need_reconfig_queues = 0;
1159 /* setup tx queues */
1160 for (qi = 0; qi < nb_txq; qi++) {
1161 if ((numa_support) &&
1162 (txring_numa[pi] != NUMA_NO_CONFIG))
1163 diag = rte_eth_tx_queue_setup(pi, qi,
1164 nb_txd,txring_numa[pi],
1167 diag = rte_eth_tx_queue_setup(pi, qi,
1168 nb_txd,port->socket_id,
1174 /* Fail to setup tx queue, return */
1175 if (rte_atomic16_cmpset(&(port->port_status),
1177 RTE_PORT_STOPPED) == 0)
1178 printf("Port %d can not be set back "
1179 "to stopped\n", pi);
1180 printf("Fail to configure port %d tx queues\n", pi);
1181 /* try to reconfigure queues next time */
1182 port->need_reconfig_queues = 1;
1185 /* setup rx queues */
1186 for (qi = 0; qi < nb_rxq; qi++) {
1187 if ((numa_support) &&
1188 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1189 struct rte_mempool * mp =
1190 mbuf_pool_find(rxring_numa[pi]);
1192 printf("Failed to setup RX queue:"
1193 "No mempool allocation"
1194 "on the socket %d\n",
1199 diag = rte_eth_rx_queue_setup(pi, qi,
1200 nb_rxd,rxring_numa[pi],
1201 &(port->rx_conf),mp);
1204 diag = rte_eth_rx_queue_setup(pi, qi,
1205 nb_rxd,port->socket_id,
1207 mbuf_pool_find(port->socket_id));
1213 /* Fail to setup rx queue, return */
1214 if (rte_atomic16_cmpset(&(port->port_status),
1216 RTE_PORT_STOPPED) == 0)
1217 printf("Port %d can not be set back "
1218 "to stopped\n", pi);
1219 printf("Fail to configure port %d rx queues\n", pi);
1220 /* try to reconfigure queues next time */
1221 port->need_reconfig_queues = 1;
1226 if (rte_eth_dev_start(pi) < 0) {
1227 printf("Fail to start port %d\n", pi);
1229 /* Fail to setup rx queue, return */
1230 if (rte_atomic16_cmpset(&(port->port_status),
1231 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1232 printf("Port %d can not be set back to "
1237 if (rte_atomic16_cmpset(&(port->port_status),
1238 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1239 printf("Port %d can not be set into started\n", pi);
1241 /* at least one port started, need checking link status */
1242 need_check_link_status = 1;
1245 if (need_check_link_status)
1246 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1248 printf("Please stop the ports first\n");
1254 stop_port(portid_t pid)
1257 struct rte_port *port;
1258 int need_check_link_status = 0;
1260 if (test_done == 0) {
1261 printf("Please stop forwarding first\n");
1268 printf("Stopping ports...\n");
1270 for (pi = 0; pi < nb_ports; pi++) {
1271 if (pid < nb_ports && pid != pi)
1275 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1276 RTE_PORT_HANDLING) == 0)
1279 rte_eth_dev_stop(pi);
1281 if (rte_atomic16_cmpset(&(port->port_status),
1282 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1283 printf("Port %d can not be set into stopped\n", pi);
1284 need_check_link_status = 1;
1286 if (need_check_link_status)
1287 check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
1293 close_port(portid_t pid)
1296 struct rte_port *port;
1298 if (test_done == 0) {
1299 printf("Please stop forwarding first\n");
1303 printf("Closing ports...\n");
1305 for (pi = 0; pi < nb_ports; pi++) {
1306 if (pid < nb_ports && pid != pi)
1310 if (rte_atomic16_cmpset(&(port->port_status),
1311 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1312 printf("Port %d is now not stopped\n", pi);
1316 rte_eth_dev_close(pi);
1318 if (rte_atomic16_cmpset(&(port->port_status),
1319 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1320 printf("Port %d can not be set into stopped\n", pi);
1327 all_ports_stopped(void)
1330 struct rte_port *port;
1332 for (pi = 0; pi < nb_ports; pi++) {
1334 if (port->port_status != RTE_PORT_STOPPED)
1346 for (pt_id = 0; pt_id < nb_ports; pt_id++) {
1347 printf("Stopping port %d...", pt_id);
1349 rte_eth_dev_close(pt_id);
1355 typedef void (*cmd_func_t)(void);
1356 struct pmd_test_command {
1357 const char *cmd_name;
1358 cmd_func_t cmd_func;
1361 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1363 /* Check the link status of all ports in up to 9s, and print them finally */
1365 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1367 #define CHECK_INTERVAL 100 /* 100ms */
1368 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1369 uint8_t portid, count, all_ports_up, print_flag = 0;
1370 struct rte_eth_link link;
1372 printf("Checking link statuses...\n");
1374 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1376 for (portid = 0; portid < port_num; portid++) {
1377 if ((port_mask & (1 << portid)) == 0)
1379 memset(&link, 0, sizeof(link));
1380 rte_eth_link_get_nowait(portid, &link);
1381 /* print link status if flag set */
1382 if (print_flag == 1) {
1383 if (link.link_status)
1384 printf("Port %d Link Up - speed %u "
1385 "Mbps - %s\n", (uint8_t)portid,
1386 (unsigned)link.link_speed,
1387 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1388 ("full-duplex") : ("half-duplex\n"));
1390 printf("Port %d Link Down\n",
1394 /* clear all_ports_up flag if any link down */
1395 if (link.link_status == 0) {
1400 /* after finally printing all link status, get out */
1401 if (print_flag == 1)
1404 if (all_ports_up == 0) {
1406 rte_delay_ms(CHECK_INTERVAL);
1409 /* set the print_flag if all ports up or timeout */
1410 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1417 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1421 uint8_t mapping_found = 0;
1423 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1424 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1425 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1426 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1427 tx_queue_stats_mappings[i].queue_id,
1428 tx_queue_stats_mappings[i].stats_counter_id);
1435 port->tx_queue_stats_mapping_enabled = 1;
1440 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1444 uint8_t mapping_found = 0;
1446 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1447 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1448 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1449 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1450 rx_queue_stats_mappings[i].queue_id,
1451 rx_queue_stats_mappings[i].stats_counter_id);
1458 port->rx_queue_stats_mapping_enabled = 1;
1463 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1467 diag = set_tx_queue_stats_mapping_registers(pi, port);
1469 if (diag == -ENOTSUP) {
1470 port->tx_queue_stats_mapping_enabled = 0;
1471 printf("TX queue stats mapping not supported port id=%d\n", pi);
1474 rte_exit(EXIT_FAILURE,
1475 "set_tx_queue_stats_mapping_registers "
1476 "failed for port id=%d diag=%d\n",
1480 diag = set_rx_queue_stats_mapping_registers(pi, port);
1482 if (diag == -ENOTSUP) {
1483 port->rx_queue_stats_mapping_enabled = 0;
1484 printf("RX queue stats mapping not supported port id=%d\n", pi);
1487 rte_exit(EXIT_FAILURE,
1488 "set_rx_queue_stats_mapping_registers "
1489 "failed for port id=%d diag=%d\n",
1495 init_port_config(void)
1498 struct rte_port *port;
1500 for (pid = 0; pid < nb_ports; pid++) {
1502 port->dev_conf.rxmode = rx_mode;
1503 port->dev_conf.fdir_conf = fdir_conf;
1505 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1506 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1508 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1509 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1511 port->rx_conf.rx_thresh = rx_thresh;
1512 port->rx_conf.rx_free_thresh = rx_free_thresh;
1513 port->rx_conf.rx_drop_en = rx_drop_en;
1514 port->tx_conf.tx_thresh = tx_thresh;
1515 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1516 port->tx_conf.tx_free_thresh = tx_free_thresh;
1517 port->tx_conf.txq_flags = txq_flags;
1519 rte_eth_macaddr_get(pid, &port->eth_addr);
1521 map_port_queue_stats_mapping_registers(pid, port);
1525 const uint16_t vlan_tags[] = {
1526 0, 1, 2, 3, 4, 5, 6, 7,
1527 8, 9, 10, 11, 12, 13, 14, 15,
1528 16, 17, 18, 19, 20, 21, 22, 23,
1529 24, 25, 26, 27, 28, 29, 30, 31
1533 get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
1538 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1539 * given above, and the number of traffic classes available for use.
1541 if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
1542 struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
1543 struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
1545 /* VMDQ+DCB RX and TX configrations */
1546 vmdq_rx_conf.enable_default_pool = 0;
1547 vmdq_rx_conf.default_pool = 0;
1548 vmdq_rx_conf.nb_queue_pools =
1549 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1550 vmdq_tx_conf.nb_queue_pools =
1551 (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1553 vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1554 for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
1555 vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
1556 vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
1558 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1559 vmdq_rx_conf.dcb_queue[i] = i;
1560 vmdq_tx_conf.dcb_queue[i] = i;
1563 /*set DCB mode of RX and TX of multiple queues*/
1564 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1565 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1566 if (dcb_conf->pfc_en)
1567 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1569 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1571 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
1572 sizeof(struct rte_eth_vmdq_dcb_conf)));
1573 (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
1574 sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
1577 struct rte_eth_dcb_rx_conf rx_conf;
1578 struct rte_eth_dcb_tx_conf tx_conf;
1580 /* queue mapping configuration of DCB RX and TX */
1581 if (dcb_conf->num_tcs == ETH_4_TCS)
1582 dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
1584 dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
1586 rx_conf.nb_tcs = dcb_conf->num_tcs;
1587 tx_conf.nb_tcs = dcb_conf->num_tcs;
1589 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
1590 rx_conf.dcb_queue[i] = i;
1591 tx_conf.dcb_queue[i] = i;
1593 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
1594 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1595 if (dcb_conf->pfc_en)
1596 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
1598 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1600 (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
1601 sizeof(struct rte_eth_dcb_rx_conf)));
1602 (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
1603 sizeof(struct rte_eth_dcb_tx_conf)));
1610 init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
1612 struct rte_eth_conf port_conf;
1613 struct rte_port *rte_port;
1618 /* rxq and txq configuration in dcb mode */
1621 rx_free_thresh = 64;
1623 memset(&port_conf,0,sizeof(struct rte_eth_conf));
1624 /* Enter DCB configuration status */
1627 nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
1628 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1629 retval = get_eth_dcb_conf(&port_conf, dcb_conf);
1633 rte_port = &ports[pid];
1634 memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
1636 rte_port->rx_conf.rx_thresh = rx_thresh;
1637 rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
1638 rte_port->tx_conf.tx_thresh = tx_thresh;
1639 rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1640 rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
1642 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1643 for (i = 0; i < nb_vlan; i++){
1644 rx_vft_set(pid, vlan_tags[i], 1);
1647 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1648 map_port_queue_stats_mapping_registers(pid, rte_port);
1653 #ifdef RTE_EXEC_ENV_BAREMETAL
1658 main(int argc, char** argv)
1663 diag = rte_eal_init(argc, argv);
1665 rte_panic("Cannot init EAL\n");
1667 if (rte_pmd_init_all())
1668 rte_panic("Cannot init PMD\n");
1670 if (rte_eal_pci_probe())
1671 rte_panic("Cannot probe PCI\n");
1673 nb_ports = (portid_t) rte_eth_dev_count();
1675 rte_exit(EXIT_FAILURE, "No probed ethernet devices - "
1677 "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
1678 "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
1679 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
1680 "configuration file\n");
1682 set_def_fwd_config();
1684 rte_panic("Empty set of forwarding logical cores - check the "
1685 "core mask supplied in the command parameters\n");
1690 launch_args_parse(argc, argv);
1692 if (nb_rxq > nb_txq)
1693 printf("Warning: nb_rxq=%d enables RSS configuration, "
1694 "but nb_txq=%d will prevent to fully test it.\n",
1698 start_port(RTE_PORT_ALL);
1700 /* set all ports to promiscuous mode by default */
1701 for (port_id = 0; port_id < nb_ports; port_id++)
1702 rte_eth_promiscuous_enable(port_id);
1704 if (interactive == 1)
1710 printf("No commandline core given, start packet forwarding\n");
1711 start_packet_forwarding(0);
1712 printf("Press enter to exit\n");
1713 rc = read(0, &c, 1);