/*-
* BSD LICENSE
- *
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
- *
- * * Redistributions of source code must retain the above copyright
+ *
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <stdarg.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
+#ifdef RTE_LIBRTE_PMD_XENVIRT
+#include <rte_eth_xenvirt.h>
+#endif
#include "testpmd.h"
+#include "mempool_osdep.h"
uint16_t verbose_level = 0; /**< Silent by default. */
/* use master core for command line ? */
uint8_t interactive = 0;
+uint8_t auto_start = 0;
/*
* NUMA support configuration.
*/
uint8_t numa_support = 0; /**< No numa support by default */
+/*
+ * In UMA mode,all memory is allocated from socket 0 if --socket-num is
+ * not configured.
+ */
+uint8_t socket_num = UMA_NO_CONFIG;
+
+/*
+ * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
+ */
+uint8_t mp_anon = 0;
+
/*
* Record the Ethernet address of peer target ports to which packets are
* forwarded.
struct fwd_engine * fwd_engines[] = {
&io_fwd_engine,
&mac_fwd_engine,
+ &mac_retry_fwd_engine,
+ &mac_swap_engine,
+ &flow_gen_engine,
&rx_only_engine,
&tx_only_engine,
&csum_fwd_engine,
+ &icmp_echo_engine,
#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
#endif
struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
+uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
+ * specified on command-line. */
/*
* Configuration of packet segments used by the "txonly" processing engine.
uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
-uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
+uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
-/*
- * Ethernet Ports Configuration.
- */
-int promiscuous_on = 1; /**< Ports set in promiscuous mode by default. */
+/* current configuration is in DCB or not,0 means it is not in DCB mode */
+uint8_t dcb_config = 0;
+
+/* Whether the dcb is in testing status */
+uint8_t dcb_test = 0;
+
+/* DCB on and VT on mapping is default */
+enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
/*
* Configurable number of RX/TX queues.
*/
#define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
#define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
-#define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
+#define RX_WTHRESH 0 /**< Default value of RX write-back threshold register. */
-#define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
+#define TX_PTHRESH 32 /**< Default value of TX prefetch threshold register. */
#define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
#define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
/*
* Configurable value of RX free threshold.
*/
-uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
+uint16_t rx_free_thresh = 32; /* Refill RX descriptors once every 32 packets,
+ This setting is needed for ixgbe to enable bulk alloc or vector
+ receive functionality. */
+
+/*
+ * Configurable value of RX drop enable.
+ */
+uint8_t rx_drop_en = 0; /* Drop packets when no descriptors for queue. */
/*
* Configurable value of TX free threshold.
*/
uint16_t tx_rs_thresh = 0; /* Use default values. */
+/*
+ * Configurable value of TX queue flags.
+ */
+uint32_t txq_flags = 0; /* No flags set. */
+
/*
* Receive Side Scaling (RSS) configuration.
*/
-uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
+uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
/*
* Port topology configuration
*/
uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
+/*
+ * Avoids to flush all the RX streams before starts forwarding.
+ */
+uint8_t no_flush_rx = 0; /* flush by default */
+
+/*
+ * Avoids to check link status when starting/stopping a port.
+ */
+uint8_t no_link_check = 0; /* check by default */
+
+/*
+ * NIC bypass mode configuration options.
+ */
+#ifdef RTE_NIC_BYPASS
+
+/* The NIC bypass watchdog timeout. */
+uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
+
+#endif
+
/*
* Ethernet device configuration.
*/
.drop_queue = 127,
};
-static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
+volatile int test_done = 1; /* stop packet forwarding when set to 1. */
struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
+static void check_all_ports_link_status(uint8_t port_num, uint32_t port_mask);
+
+/*
+ * Check if all the ports are started.
+ * If yes, return positive value. If not, return zero.
+ */
+static int all_ports_started(void);
/*
* Setup default configuration.
mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
mb_ctor_arg->seg_buf_offset);
mb->buf_len = mb_ctor_arg->seg_buf_size;
- mb->type = RTE_MBUF_PKT;
mb->ol_flags = 0;
- mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.vlan_macip.data = 0;
- mb->pkt.hash.rss = 0;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->tx_offload = 0;
+ mb->vlan_tci = 0;
+ mb->hash.rss = 0;
}
static void
return;
}
mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
- mbp_priv = (struct rte_pktmbuf_pool_private *)
- ((char *)mp + sizeof(struct rte_mempool));
+ mbp_priv = rte_mempool_get_priv(mp);
mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
}
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
- rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
+
+#ifdef RTE_LIBRTE_PMD_XENVIRT
+ rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
+ (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
+ testpmd_mbuf_ctor, &mb_ctor_arg,
+ socket_id, 0);
+
+
+
+#else
+ if (mp_anon != 0)
+ rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
+ (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
+ testpmd_mbuf_ctor, &mb_ctor_arg,
+ socket_id, 0);
+ else
+ rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
(unsigned) mb_mempool_cache,
sizeof(struct rte_pktmbuf_pool_private),
testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
testpmd_mbuf_ctor, &mb_ctor_arg,
socket_id, 0);
+
+#endif
+
if (rte_mp == NULL) {
- rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u failed\n",
- socket_id);
+ rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
+ "failed\n", socket_id);
+ } else if (verbose_level > 0) {
+ rte_mempool_dump(stdout, rte_mp);
}
}
+/*
+ * Check given socket id is valid or not with NUMA mode,
+ * if valid, return 0, else return -1
+ */
+static int
+check_socket_id(const unsigned int socket_id)
+{
+ static int warning_once = 0;
+
+ if (socket_id >= MAX_SOCKET) {
+ if (!warning_once && numa_support)
+ printf("Warning: NUMA should be configured manually by"
+ " using --port-numa-config and"
+ " --ring-numa-config parameters along with"
+ " --numa.\n");
+ warning_once = 1;
+ return -1;
+ }
+ return 0;
+}
+
static void
init_config(void)
{
+ portid_t pid;
struct rte_port *port;
struct rte_mempool *mbp;
unsigned int nb_mbuf_per_pool;
- streamid_t sm_id;
lcoreid_t lc_id;
- portid_t pt_id;
+ uint8_t port_per_socket[MAX_SOCKET];
+ memset(port_per_socket,0,MAX_SOCKET);
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
CACHE_LINE_SIZE);
if (fwd_lcores == NULL) {
- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) failed\n",
- nb_lcores);
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
+ "failed\n", nb_lcores);
}
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
sizeof(struct fwd_lcore),
CACHE_LINE_SIZE);
if (fwd_lcores[lc_id] == NULL) {
- rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) failed\n");
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
+ "failed\n");
}
fwd_lcores[lc_id]->cpuid_idx = lc_id;
}
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
- * socket 0 memory.
+ * socket 0 memory by default.
* Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
+ *
+ * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
+ * nb_txd can be configured at run time.
*/
- nb_mbuf_per_pool = nb_rxd + (nb_lcores * mb_mempool_cache) +
- nb_txd + MAX_PKT_BURST;
+ if (param_total_num_mbufs)
+ nb_mbuf_per_pool = param_total_num_mbufs;
+ else {
+ nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
+ + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
+
+ if (!numa_support)
+ nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
+ }
+
+ if (!numa_support) {
+ if (socket_num == UMA_NO_CONFIG)
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ else
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
+ socket_num);
+ }
+
+ /* Configuration of Ethernet ports. */
+ ports = rte_zmalloc("testpmd: ports",
+ sizeof(struct rte_port) * nb_ports,
+ CACHE_LINE_SIZE);
+ if (ports == NULL) {
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
+ "failed\n", nb_ports);
+ }
+
+ for (pid = 0; pid < nb_ports; pid++) {
+ port = &ports[pid];
+ rte_eth_dev_info_get(pid, &port->dev_info);
+
+ if (numa_support) {
+ if (port_numa[pid] != NUMA_NO_CONFIG)
+ port_per_socket[port_numa[pid]]++;
+ else {
+ uint32_t socket_id = rte_eth_dev_socket_id(pid);
+
+ /* if socket_id is invalid, set to 0 */
+ if (check_socket_id(socket_id) < 0)
+ socket_id = 0;
+ port_per_socket[socket_id]++;
+ }
+ }
+
+ /* set flag to initialize port/queue */
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ }
+
if (numa_support) {
- nb_mbuf_per_pool = nb_mbuf_per_pool * (nb_ports >> 1);
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1);
- } else {
- nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ uint8_t i;
+ unsigned int nb_mbuf;
+
+ if (param_total_num_mbufs)
+ nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
+
+ for (i = 0; i < MAX_SOCKET; i++) {
+ nb_mbuf = (nb_mbuf_per_pool *
+ port_per_socket[i]);
+ if (nb_mbuf)
+ mbuf_pool_create(mbuf_data_size,
+ nb_mbuf,i);
+ }
}
+ init_port_config();
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
- mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
+ mbp = mbuf_pool_find(
+ rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
+
if (mbp == NULL)
mbp = mbuf_pool_find(0);
fwd_lcores[lc_id]->mbp = mbp;
}
- /* Configuration of Ethernet ports. */
- ports = rte_zmalloc("testpmd: ports",
+ /* Configuration of packet forwarding streams. */
+ if (init_fwd_streams() < 0)
+ rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
+}
+
+
+void
+reconfig(portid_t new_port_id, unsigned socket_id)
+{
+ struct rte_port *port;
+
+ /* Reconfiguration of Ethernet ports. */
+ ports = rte_realloc(ports,
sizeof(struct rte_port) * nb_ports,
CACHE_LINE_SIZE);
if (ports == NULL) {
- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) failed\n",
- nb_ports);
+ rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
+ nb_ports);
}
- port = ports;
- for (pt_id = 0; pt_id < nb_ports; pt_id++, port++) {
- rte_eth_dev_info_get(pt_id, &port->dev_info);
+
+ port = &ports[new_port_id];
+ rte_eth_dev_info_get(new_port_id, &port->dev_info);
+
+ /* set flag to initialize port/queue */
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ port->socket_id = socket_id;
+
+ init_port_config();
+}
+
+
+int
+init_fwd_streams(void)
+{
+ portid_t pid;
+ struct rte_port *port;
+ streamid_t sm_id, nb_fwd_streams_new;
+
+ /* set socket id according to numa or not */
+ for (pid = 0; pid < nb_ports; pid++) {
+ port = &ports[pid];
if (nb_rxq > port->dev_info.max_rx_queues) {
- rte_exit(EXIT_FAILURE, "Port %d: max RX queues %d < nb_rxq %d\n",
- (int) pt_id,
- (int) port->dev_info.max_rx_queues,
- (int) nb_rxq);
+ printf("Fail: nb_rxq(%d) is greater than "
+ "max_rx_queues(%d)\n", nb_rxq,
+ port->dev_info.max_rx_queues);
+ return -1;
}
if (nb_txq > port->dev_info.max_tx_queues) {
- rte_exit(EXIT_FAILURE, "Port %d: max TX queues %d < nb_txq %d\n",
- (int) pt_id,
- (int) port->dev_info.max_tx_queues,
- (int) nb_txq);
+ printf("Fail: nb_txq(%d) is greater than "
+ "max_tx_queues(%d)\n", nb_txq,
+ port->dev_info.max_tx_queues);
+ return -1;
+ }
+ if (numa_support) {
+ if (port_numa[pid] != NUMA_NO_CONFIG)
+ port->socket_id = port_numa[pid];
+ else {
+ port->socket_id = rte_eth_dev_socket_id(pid);
+
+ /* if socket_id is invalid, set to 0 */
+ if (check_socket_id(port->socket_id) < 0)
+ port->socket_id = 0;
+ }
+ }
+ else {
+ if (socket_num == UMA_NO_CONFIG)
+ port->socket_id = 0;
+ else
+ port->socket_id = socket_num;
}
+ }
- if (numa_support)
- port->socket_id = (pt_id < (nb_ports >> 1)) ? 0 : 1;
- else
- port->socket_id = 0;
+ nb_fwd_streams_new = (streamid_t)(nb_ports * nb_rxq);
+ if (nb_fwd_streams_new == nb_fwd_streams)
+ return 0;
+ /* clear the old */
+ if (fwd_streams != NULL) {
+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+ if (fwd_streams[sm_id] == NULL)
+ continue;
+ rte_free(fwd_streams[sm_id]);
+ fwd_streams[sm_id] = NULL;
+ }
+ rte_free(fwd_streams);
+ fwd_streams = NULL;
}
- /* Configuration of packet forwarding streams. */
- nb_fwd_streams = (streamid_t) (nb_ports * nb_rxq);
+ /* init new */
+ nb_fwd_streams = nb_fwd_streams_new;
fwd_streams = rte_zmalloc("testpmd: fwd_streams",
- sizeof(struct fwd_stream *) * nb_fwd_streams,
- CACHE_LINE_SIZE);
- if (fwd_streams == NULL) {
- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) failed\n",
- nb_fwd_streams);
- }
+ sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
+ if (fwd_streams == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
+ "failed\n", nb_fwd_streams);
+
for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
- sizeof(struct fwd_stream),
- CACHE_LINE_SIZE);
- if (fwd_streams[sm_id] == NULL) {
- rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream) failed\n");
- }
+ sizeof(struct fwd_stream), CACHE_LINE_SIZE);
+ if (fwd_streams[sm_id] == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
+ " failed\n");
}
+
+ return 0;
}
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
{
struct rte_port *port;
+ uint8_t i;
static const char *fwd_stats_border = "----------------------";
port = &ports[port_id];
printf("\n %s Forward statistics for port %-2d %s\n",
- fwd_stats_border, port_id, fwd_stats_border);
- printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
- "%-"PRIu64"\n",
- stats->ipackets, stats->ierrors,
- (uint64_t) (stats->ipackets + stats->ierrors));
+ fwd_stats_border, port_id, fwd_stats_border);
+
+ if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
+ printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
+ "%-"PRIu64"\n",
+ stats->ipackets, stats->imissed,
+ (uint64_t) (stats->ipackets + stats->imissed));
+
+ if (cur_fwd_eng == &csum_fwd_engine)
+ printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
+ port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
+ printf(" RX-badcrc: %-14"PRIu64" RX-badlen: %-14"PRIu64
+ "RX-error: %-"PRIu64"\n",
+ stats->ibadcrc, stats->ibadlen, stats->ierrors);
+ printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
+ }
- if (cur_fwd_eng == &csum_fwd_engine)
- printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
- port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
+ "%-"PRIu64"\n",
+ stats->opackets, port->tx_dropped,
+ (uint64_t) (stats->opackets + port->tx_dropped));
+ }
+ else {
+ printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
+ "%14"PRIu64"\n",
+ stats->ipackets, stats->imissed,
+ (uint64_t) (stats->ipackets + stats->imissed));
+
+ if (cur_fwd_eng == &csum_fwd_engine)
+ printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
+ port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ if (((stats->ierrors - stats->imissed) + stats->rx_nombuf) > 0) {
+ printf(" RX-badcrc: %14"PRIu64" RX-badlen: %14"PRIu64
+ " RX-error:%"PRIu64"\n",
+ stats->ibadcrc, stats->ibadlen, stats->ierrors);
+ printf(" RX-nombufs: %14"PRIu64"\n",
+ stats->rx_nombuf);
+ }
- printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
- "%-"PRIu64"\n",
- stats->opackets, port->tx_dropped,
- (uint64_t) (stats->opackets + port->tx_dropped));
+ printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
+ "%14"PRIu64"\n",
+ stats->opackets, port->tx_dropped,
+ (uint64_t) (stats->opackets + port->tx_dropped));
+ }
+
+ /* Display statistics of XON/XOFF pause frames, if any. */
+ if ((stats->tx_pause_xon | stats->rx_pause_xon |
+ stats->tx_pause_xoff | stats->rx_pause_xoff) > 0) {
+ printf(" RX-XOFF: %-14"PRIu64" RX-XON: %-14"PRIu64"\n",
+ stats->rx_pause_xoff, stats->rx_pause_xon);
+ printf(" TX-XOFF: %-14"PRIu64" TX-XON: %-14"PRIu64"\n",
+ stats->tx_pause_xoff, stats->tx_pause_xon);
+ }
- if (stats->rx_nombuf > 0)
- printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
if (port->rx_stream)
- pkt_burst_stats_display("RX", &port->rx_stream->rx_burst_stats);
+ pkt_burst_stats_display("RX",
+ &port->rx_stream->rx_burst_stats);
if (port->tx_stream)
- pkt_burst_stats_display("TX", &port->tx_stream->tx_burst_stats);
+ pkt_burst_stats_display("TX",
+ &port->tx_stream->tx_burst_stats);
#endif
/* stats fdir */
if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
- printf(" Fdirmiss: %-14"PRIu64" Fdirmatch: %-14"PRIu64"\n",
+ printf(" Fdirmiss:%14"PRIu64" Fdirmatch:%14"PRIu64"\n",
stats->fdirmiss,
stats->fdirmatch);
+ if (port->rx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d RX-packets:%14"PRIu64
+ " RX-errors:%14"PRIu64
+ " RX-bytes:%14"PRIu64"\n",
+ i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
+ }
+ printf("\n");
+ }
+ if (port->tx_queue_stats_mapping_enabled) {
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d TX-packets:%14"PRIu64
+ " TX-bytes:%14"PRIu64"\n",
+ i, stats->q_opackets[i], stats->q_obytes[i]);
+ }
+ }
+
printf(" %s--------------------------------%s\n",
fwd_stats_border, fwd_stats_border);
}
/* if checksum mode */
if (cur_fwd_eng == &csum_fwd_engine) {
- printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: %-14u\n",
- fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
+ printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
+ "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
}
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
}
static void
-flush_all_rx_queues(void)
+flush_fwd_rx_queues(void)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
portid_t rxp;
+ portid_t port_id;
queueid_t rxq;
uint16_t nb_rx;
uint16_t i;
uint8_t j;
for (j = 0; j < 2; j++) {
- for (rxp = 0; rxp < nb_ports; rxp++) {
+ for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
for (rxq = 0; rxq < nb_rxq; rxq++) {
+ port_id = fwd_ports_ids[rxp];
do {
- nb_rx = rte_eth_rx_burst(rxp, rxq,
- pkts_burst,
- MAX_PKT_BURST);
+ nb_rx = rte_eth_rx_burst(port_id, rxq,
+ pkts_burst, MAX_PKT_BURST);
for (i = 0; i < nb_rx; i++)
rte_pktmbuf_free(pkts_burst[i]);
} while (nb_rx > 0);
portid_t pt_id;
streamid_t sm_id;
+ if (all_ports_started() == 0) {
+ printf("Not all ports were started\n");
+ return;
+ }
if (test_done == 0) {
printf("Packet forwarding already started\n");
return;
}
+ if(dcb_test) {
+ for (i = 0; i < nb_fwd_ports; i++) {
+ pt_id = fwd_ports_ids[i];
+ port = &ports[pt_id];
+ if (!port->dcb_flag) {
+ printf("In DCB mode, all forwarding ports must "
+ "be configured in this mode.\n");
+ return;
+ }
+ }
+ if (nb_fwd_lcores == 1) {
+ printf("In DCB mode,the nb forwarding cores "
+ "should be larger than 1.\n");
+ return;
+ }
+ }
test_done = 0;
- flush_all_rx_queues();
+
+ if(!no_flush_rx)
+ flush_fwd_rx_queues();
+
fwd_config_setup();
rxtx_config_display();
port = &ports[pt_id];
rte_eth_stats_get(pt_id, &port->stats);
port->tx_dropped = 0;
+
+ map_port_queue_stats_mapping_registers(pt_id, port);
}
for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
fwd_streams[sm_id]->rx_packets = 0;
#endif
static const char *acc_stats_border = "+++++++++++++++";
+ if (all_ports_started() == 0) {
+ printf("Not all ports were started\n");
+ return;
+ }
if (test_done) {
printf("Packet forwarding not started\n");
return;
fwd_streams[sm_id]->fwd_dropped);
ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
- rx_bad_ip_csum = ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
+ rx_bad_ip_csum =
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
fwd_streams[sm_id]->rx_bad_ip_csum);
- ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = rx_bad_ip_csum;
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
+ rx_bad_ip_csum;
- rx_bad_l4_csum = ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
+ rx_bad_l4_csum =
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
fwd_streams[sm_id]->rx_bad_l4_csum);
- ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = rx_bad_l4_csum;
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
+ rx_bad_l4_csum;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
fwd_cycles = (uint64_t) (fwd_cycles +
total_rx_dropped = 0;
total_tx_dropped = 0;
total_rx_nombuf = 0;
- for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
port = &ports[pt_id];
port->stats.ibytes = 0;
stats.obytes -= port->stats.obytes;
port->stats.obytes = 0;
- stats.ierrors -= port->stats.ierrors;
- port->stats.ierrors = 0;
+ stats.imissed -= port->stats.imissed;
+ port->stats.imissed = 0;
stats.oerrors -= port->stats.oerrors;
port->stats.oerrors = 0;
stats.rx_nombuf -= port->stats.rx_nombuf;
total_recv += stats.ipackets;
total_xmit += stats.opackets;
- total_rx_dropped += stats.ierrors;
+ total_rx_dropped += stats.imissed;
total_tx_dropped += port->tx_dropped;
total_rx_nombuf += stats.rx_nombuf;
test_done = 1;
}
+void
+dev_set_link_up(portid_t pid)
+{
+ if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
+ printf("\nSet link up fail.\n");
+}
+
+void
+dev_set_link_down(portid_t pid)
+{
+ if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
+ printf("\nSet link down fail.\n");
+}
+
+static int
+all_ports_started(void)
+{
+ portid_t pi;
+ struct rte_port *port;
+
+ for (pi = 0; pi < nb_ports; pi++) {
+ port = &ports[pi];
+ /* Check if there is a port which is not started */
+ if (port->port_status != RTE_PORT_STARTED)
+ return 0;
+ }
+
+ /* No port is not started */
+ return 1;
+}
+
+int
+start_port(portid_t pid)
+{
+ int diag, need_check_link_status = 0;
+ portid_t pi;
+ queueid_t qi;
+ struct rte_port *port;
+ struct ether_addr mac_addr;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return -1;
+ }
+
+ if (init_fwd_streams() < 0) {
+ printf("Fail from init_fwd_streams()\n");
+ return -1;
+ }
+
+ if(dcb_config)
+ dcb_test = 1;
+ for (pi = 0; pi < nb_ports; pi++) {
+ if (pid < nb_ports && pid != pi)
+ continue;
+
+ port = &ports[pi];
+ if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
+ RTE_PORT_HANDLING) == 0) {
+ printf("Port %d is now not stopped\n", pi);
+ continue;
+ }
+
+ if (port->need_reconfig > 0) {
+ port->need_reconfig = 0;
+
+ printf("Configuring Port %d (socket %u)\n", pi,
+ port->socket_id);
+ /* configure port */
+ diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
+ &(port->dev_conf));
+ if (diag != 0) {
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d\n", pi);
+ /* try to reconfigure port next time */
+ port->need_reconfig = 1;
+ return -1;
+ }
+ }
+ if (port->need_reconfig_queues > 0) {
+ port->need_reconfig_queues = 0;
+ /* setup tx queues */
+ for (qi = 0; qi < nb_txq; qi++) {
+ if ((numa_support) &&
+ (txring_numa[pi] != NUMA_NO_CONFIG))
+ diag = rte_eth_tx_queue_setup(pi, qi,
+ nb_txd,txring_numa[pi],
+ &(port->tx_conf));
+ else
+ diag = rte_eth_tx_queue_setup(pi, qi,
+ nb_txd,port->socket_id,
+ &(port->tx_conf));
+
+ if (diag == 0)
+ continue;
+
+ /* Fail to setup tx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING,
+ RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d tx queues\n", pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ /* setup rx queues */
+ for (qi = 0; qi < nb_rxq; qi++) {
+ if ((numa_support) &&
+ (rxring_numa[pi] != NUMA_NO_CONFIG)) {
+ struct rte_mempool * mp =
+ mbuf_pool_find(rxring_numa[pi]);
+ if (mp == NULL) {
+ printf("Failed to setup RX queue:"
+ "No mempool allocation"
+ "on the socket %d\n",
+ rxring_numa[pi]);
+ return -1;
+ }
+
+ diag = rte_eth_rx_queue_setup(pi, qi,
+ nb_rxd,rxring_numa[pi],
+ &(port->rx_conf),mp);
+ }
+ else
+ diag = rte_eth_rx_queue_setup(pi, qi,
+ nb_rxd,port->socket_id,
+ &(port->rx_conf),
+ mbuf_pool_find(port->socket_id));
+
+ if (diag == 0)
+ continue;
+
+
+ /* Fail to setup rx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING,
+ RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d rx queues\n", pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ }
+ /* start port */
+ if (rte_eth_dev_start(pi) < 0) {
+ printf("Fail to start port %d\n", pi);
+
+ /* Fail to setup rx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back to "
+ "stopped\n", pi);
+ continue;
+ }
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
+ printf("Port %d can not be set into started\n", pi);
+
+ rte_eth_macaddr_get(pi, &mac_addr);
+ printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
+ mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
+ mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
+ mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
+
+ /* at least one port started, need checking link status */
+ need_check_link_status = 1;
+ }
+
+ if (need_check_link_status && !no_link_check)
+ check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
+ else
+ printf("Please stop the ports first\n");
+
+ printf("Done\n");
+ return 0;
+}
+
+void
+stop_port(portid_t pid)
+{
+ portid_t pi;
+ struct rte_port *port;
+ int need_check_link_status = 0;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+ if (dcb_test) {
+ dcb_test = 0;
+ dcb_config = 0;
+ }
+ printf("Stopping ports...\n");
+
+ for (pi = 0; pi < nb_ports; pi++) {
+ if (pid < nb_ports && pid != pi)
+ continue;
+
+ port = &ports[pi];
+ if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
+ RTE_PORT_HANDLING) == 0)
+ continue;
+
+ rte_eth_dev_stop(pi);
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set into stopped\n", pi);
+ need_check_link_status = 1;
+ }
+ if (need_check_link_status && !no_link_check)
+ check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
+
+ printf("Done\n");
+}
+
+void
+close_port(portid_t pid)
+{
+ portid_t pi;
+ struct rte_port *port;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ printf("Closing ports...\n");
+
+ for (pi = 0; pi < nb_ports; pi++) {
+ if (pid < nb_ports && pid != pi)
+ continue;
+
+ port = &ports[pi];
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
+ printf("Port %d is now not stopped\n", pi);
+ continue;
+ }
+
+ rte_eth_dev_close(pi);
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
+ printf("Port %d can not be set into stopped\n", pi);
+ }
+
+ printf("Done\n");
+}
+
+int
+all_ports_stopped(void)
+{
+ portid_t pi;
+ struct rte_port *port;
+
+ for (pi = 0; pi < nb_ports; pi++) {
+ port = &ports[pi];
+ if (port->port_status != RTE_PORT_STOPPED)
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+port_is_started(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id))
+ return -1;
+
+ if (ports[port_id].port_status != RTE_PORT_STARTED)
+ return 0;
+
+ return 1;
+}
+
void
pmd_test_exit(void)
{
#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
+/* Check the link status of all ports in up to 9s, and print them finally */
static void
-fatal_init_error(const char *func_name, uint8_t port_id, int diag)
+check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint8_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("Checking link statuses...\n");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", (uint8_t)portid,
+ (unsigned)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint8_t)portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == 0) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ }
+ }
+}
+
+static int
+set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
{
- rte_panic("%s(port_id=%d) failed - diag=%d\n",
- func_name, port_id, diag);
+ uint16_t i;
+ int diag;
+ uint8_t mapping_found = 0;
+
+ for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
+ if ((tx_queue_stats_mappings[i].port_id == port_id) &&
+ (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
+ diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
+ tx_queue_stats_mappings[i].queue_id,
+ tx_queue_stats_mappings[i].stats_counter_id);
+ if (diag != 0)
+ return diag;
+ mapping_found = 1;
+ }
+ }
+ if (mapping_found)
+ port->tx_queue_stats_mapping_enabled = 1;
+ return 0;
}
-static void
-init_ports(void)
+static int
+set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
{
- struct rte_eth_link link;
- struct rte_eth_conf port_conf = {
- .intr_conf = {
- .lsc = 0,
- },
- };
- struct rte_eth_rxconf rx_conf;
- struct rte_eth_txconf tx_conf;
- struct rte_port *port;
- unsigned int sock_id;
- portid_t pi;
- queueid_t qi;
+ uint16_t i;
int diag;
+ uint8_t mapping_found = 0;
+
+ for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
+ if ((rx_queue_stats_mappings[i].port_id == port_id) &&
+ (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
+ diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
+ rx_queue_stats_mappings[i].queue_id,
+ rx_queue_stats_mappings[i].stats_counter_id);
+ if (diag != 0)
+ return diag;
+ mapping_found = 1;
+ }
+ }
+ if (mapping_found)
+ port->rx_queue_stats_mapping_enabled = 1;
+ return 0;
+}
- port_conf.rxmode = rx_mode;
- port_conf.fdir_conf = fdir_conf;
+static void
+map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
+{
+ int diag = 0;
- if (nb_rxq > 0) { /* configure RSS */
- port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
- /* use default hash key */
- port_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
- } else
- port_conf.rx_adv_conf.rss_conf.rss_hf = 0;
- rx_conf.rx_thresh = rx_thresh;
- rx_conf.rx_free_thresh = rx_free_thresh;
- tx_conf.tx_thresh = tx_thresh;
- tx_conf.tx_rs_thresh = tx_rs_thresh;
- tx_conf.tx_free_thresh = tx_free_thresh;
+ diag = set_tx_queue_stats_mapping_registers(pi, port);
+ if (diag != 0) {
+ if (diag == -ENOTSUP) {
+ port->tx_queue_stats_mapping_enabled = 0;
+ printf("TX queue stats mapping not supported port id=%d\n", pi);
+ }
+ else
+ rte_exit(EXIT_FAILURE,
+ "set_tx_queue_stats_mapping_registers "
+ "failed for port id=%d diag=%d\n",
+ pi, diag);
+ }
- for (pi = 0; pi < nb_ports; pi++) {
- port = &ports[pi];
- memcpy(&port->dev_conf, &port_conf, sizeof(port_conf));
- sock_id = port->socket_id;
- printf("Initializing port %d... ", pi);
- fflush(stdout);
- diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, &port_conf);
- if (diag != 0) {
- fatal_init_error("rte_eth_dev_configure", pi, diag);
- /* NOT REACHED */
+ diag = set_rx_queue_stats_mapping_registers(pi, port);
+ if (diag != 0) {
+ if (diag == -ENOTSUP) {
+ port->rx_queue_stats_mapping_enabled = 0;
+ printf("RX queue stats mapping not supported port id=%d\n", pi);
}
- rte_eth_macaddr_get(pi, &port->eth_addr);
- for (qi = 0; qi < nb_txq; qi++) {
- diag = rte_eth_tx_queue_setup(pi, qi, nb_txd,
- sock_id,
- &tx_conf);
- if (diag != 0) {
- fatal_init_error("rte_eth_tx_queue_setup",
- pi, diag);
- /* NOT REACHED */
- }
+ else
+ rte_exit(EXIT_FAILURE,
+ "set_rx_queue_stats_mapping_registers "
+ "failed for port id=%d diag=%d\n",
+ pi, diag);
+ }
+}
+
+void
+init_port_config(void)
+{
+ portid_t pid;
+ struct rte_port *port;
+
+ for (pid = 0; pid < nb_ports; pid++) {
+ port = &ports[pid];
+ port->dev_conf.rxmode = rx_mode;
+ port->dev_conf.fdir_conf = fdir_conf;
+ if (nb_rxq > 1) {
+ port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ } else {
+ port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
}
- for (qi = 0; qi < nb_rxq; qi++) {
- diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd, sock_id,
- &rx_conf,
- mbuf_pool_find(sock_id));
- if (diag != 0) {
- fatal_init_error("rte_eth_rx_queue_setup",
- pi , diag);
- /* NOT REACHED */
- }
+
+ /* In SR-IOV mode, RSS mode is not available */
+ if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
+ if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
+ port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ else
+ port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
}
- /* Start device */
- diag = rte_eth_dev_start(pi);
- if (diag != 0) {
- fatal_init_error("rte_eth_dev_start", pi, diag);
- /* NOT REACHED */
+ port->rx_conf.rx_thresh = rx_thresh;
+ port->rx_conf.rx_free_thresh = rx_free_thresh;
+ port->rx_conf.rx_drop_en = rx_drop_en;
+ port->tx_conf.tx_thresh = tx_thresh;
+ port->tx_conf.tx_rs_thresh = tx_rs_thresh;
+ port->tx_conf.tx_free_thresh = tx_free_thresh;
+ port->tx_conf.txq_flags = txq_flags;
+
+ rte_eth_macaddr_get(pid, &port->eth_addr);
+
+ map_port_queue_stats_mapping_registers(pid, port);
+#ifdef RTE_NIC_BYPASS
+ rte_eth_dev_bypass_init(pid);
+#endif
+ }
+}
+
+const uint16_t vlan_tags[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static int
+get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
+{
+ uint8_t i;
+
+ /*
+ * Builds up the correct configuration for dcb+vt based on the vlan tags array
+ * given above, and the number of traffic classes available for use.
+ */
+ if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
+ struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
+ struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
+
+ /* VMDQ+DCB RX and TX configrations */
+ vmdq_rx_conf.enable_default_pool = 0;
+ vmdq_rx_conf.default_pool = 0;
+ vmdq_rx_conf.nb_queue_pools =
+ (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ vmdq_tx_conf.nb_queue_pools =
+ (dcb_conf->num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+
+ vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
+ for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
+ vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
+ vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
}
- printf("done: ");
- rte_eth_link_get(pi, &link);
- if (link.link_status) {
- printf(" Link Up - speed %u Mbps - %s\n",
- (unsigned) link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
- } else {
- printf(" Link Down\n");
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ vmdq_rx_conf.dcb_queue[i] = i;
+ vmdq_tx_conf.dcb_queue[i] = i;
+ }
+
+ /*set DCB mode of RX and TX of multiple queues*/
+ eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
+ eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ if (dcb_conf->pfc_en)
+ eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
+ else
+ eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+
+ (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
+ sizeof(struct rte_eth_vmdq_dcb_conf)));
+ (void)(rte_memcpy(ð_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
+ sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
+ }
+ else {
+ struct rte_eth_dcb_rx_conf rx_conf;
+ struct rte_eth_dcb_tx_conf tx_conf;
+
+ /* queue mapping configuration of DCB RX and TX */
+ if (dcb_conf->num_tcs == ETH_4_TCS)
+ dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
+ else
+ dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
+
+ rx_conf.nb_tcs = dcb_conf->num_tcs;
+ tx_conf.nb_tcs = dcb_conf->num_tcs;
+
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+ rx_conf.dcb_queue[i] = i;
+ tx_conf.dcb_queue[i] = i;
}
+ eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
+ eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+ if (dcb_conf->pfc_en)
+ eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
+ else
+ eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
- /*
- * If enabled, put device in promiscuous mode.
- * This allows the PMD test in IO forwarding mode to forward
- * packets to itself through 2 cross-connected ports of the
- * target machine.
- */
- if (promiscuous_on)
- rte_eth_promiscuous_enable(pi);
+ (void)(rte_memcpy(ð_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
+ sizeof(struct rte_eth_dcb_rx_conf)));
+ (void)(rte_memcpy(ð_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
+ sizeof(struct rte_eth_dcb_tx_conf)));
}
+
+ return 0;
+}
+
+int
+init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
+{
+ struct rte_eth_conf port_conf;
+ struct rte_port *rte_port;
+ int retval;
+ uint16_t nb_vlan;
+ uint16_t i;
+
+ /* rxq and txq configuration in dcb mode */
+ nb_rxq = 128;
+ nb_txq = 128;
+ rx_free_thresh = 64;
+
+ memset(&port_conf,0,sizeof(struct rte_eth_conf));
+ /* Enter DCB configuration status */
+ dcb_config = 1;
+
+ nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
+ /*set configuration of DCB in vt mode and DCB in non-vt mode*/
+ retval = get_eth_dcb_conf(&port_conf, dcb_conf);
+ if (retval < 0)
+ return retval;
+
+ rte_port = &ports[pid];
+ memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
+
+ rte_port->rx_conf.rx_thresh = rx_thresh;
+ rte_port->rx_conf.rx_free_thresh = rx_free_thresh;
+ rte_port->tx_conf.tx_thresh = tx_thresh;
+ rte_port->tx_conf.tx_rs_thresh = tx_rs_thresh;
+ rte_port->tx_conf.tx_free_thresh = tx_free_thresh;
+ /* VLAN filter */
+ rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
+ for (i = 0; i < nb_vlan; i++){
+ rx_vft_set(pid, vlan_tags[i], 1);
+ }
+
+ rte_eth_macaddr_get(pid, &rte_port->eth_addr);
+ map_port_queue_stats_mapping_registers(pid, rte_port);
+
+ rte_port->dcb_flag = 1;
+
+ return 0;
}
#ifdef RTE_EXEC_ENV_BAREMETAL
main(int argc, char** argv)
{
int diag;
+ uint8_t port_id;
diag = rte_eal_init(argc, argv);
if (diag < 0)
rte_panic("Cannot init EAL\n");
- if (rte_pmd_init_all())
- rte_panic("Cannot init PMD\n");
-
- if (rte_eal_pci_probe())
- rte_panic("Cannot probe PCI\n");
-
nb_ports = (portid_t) rte_eth_dev_count();
if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No probed ethernet devices - check that "
- "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
- "CONFIG_RTE_LIBRTE_EM_PMD=y and that "
- "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
- "configuration file\n");
+ rte_exit(EXIT_FAILURE, "No probed ethernet device\n");
set_def_fwd_config();
if (nb_lcores == 0)
nb_rxq, nb_txq);
init_config();
-
- init_ports();
-
- if (interactive == 1)
+ if (start_port(RTE_PORT_ALL) != 0)
+ rte_exit(EXIT_FAILURE, "Start ports failed\n");
+
+ /* set all ports to promiscuous mode by default */
+ for (port_id = 0; port_id < nb_ports; port_id++)
+ rte_eth_promiscuous_enable(port_id);
+
+#ifdef RTE_LIBRTE_CMDLINE
+ if (interactive == 1) {
+ if (auto_start) {
+ printf("Start automatic packet forwarding\n");
+ start_packet_forwarding(0);
+ }
prompt();
- else {
+ } else
+#endif
+ {
char c;
int rc;