#include <rte_alarm.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_pdump.h>
#endif
#include <rte_flow.h>
+#ifdef RTE_LIB_METRICS
#include <rte_metrics.h>
+#endif
#ifdef RTE_LIB_BITRATESTATS
#include <rte_bitrate.h>
#endif
#ifdef RTE_EXEC_ENV_WINDOWS
#include <process.h>
#endif
+#ifdef RTE_NET_BOND
+#include <rte_eth_bond.h>
+#endif
#include "testpmd.h"
#endif
#define EXTMEM_HEAP_NAME "extmem"
-#define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
+/*
+ * Zone size with the malloc overhead (max of debug and release variants)
+ * must fit into the smallest supported hugepage size (2M),
+ * so that an IOVA-contiguous zone of this size can always be allocated
+ * if there are free 2M hugepages.
+ */
+#define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE)
uint16_t verbose_level = 0; /**< Silent by default. */
int testpmd_logtype; /**< Log type for testpmd logs */
/*
* Receive Side Scaling (RSS) configuration.
*/
-uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
/*
* Port topology configuration
uint8_t latencystats_enabled;
/*
- * Lcore ID to serive latency statistics.
+ * Lcore ID to service latency statistics.
*/
lcoreid_t latencystats_lcore_id = -1;
struct rte_eth_rxmode rx_mode;
struct rte_eth_txmode tx_mode = {
- .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+ .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
};
-struct rte_fdir_conf fdir_conf = {
+struct rte_eth_fdir_conf fdir_conf = {
.mode = RTE_FDIR_MODE_NONE,
- .pballoc = RTE_FDIR_PBALLOC_64K,
+ .pballoc = RTE_ETH_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
.vlan_tci_mask = 0xFFEF,
uint8_t bitrate_enabled;
#endif
+#ifdef RTE_LIB_GRO
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
+#endif
/*
* hexadecimal bitmask of RX mq mode can be enabled.
*/
-enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
/*
* Used to set forced link speed
}
}
-static void
-flow_pick_transfer_proxy_mp(uint16_t port_id)
-{
- struct rte_port *port = &ports[port_id];
- int ret;
-
- port->flow_transfer_proxy = port_id;
-
- if (!is_proc_primary())
- return;
-
- ret = rte_flow_pick_transfer_proxy(port_id, &port->flow_transfer_proxy,
- NULL);
- if (ret != 0) {
- fprintf(stderr, "Error picking flow transfer proxy for port %u: %s - ignore\n",
- port_id, rte_strerror(-ret));
- }
-}
-
static int
eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
return 0;
}
+static int
+change_bonding_slave_port_status(portid_t bond_pid, bool is_stop)
+{
+#ifdef RTE_NET_BOND
+
+ portid_t slave_pids[RTE_MAX_ETHPORTS];
+ struct rte_port *port;
+ int num_slaves;
+ portid_t slave_pid;
+ int i;
+
+ num_slaves = rte_eth_bond_slaves_get(bond_pid, slave_pids,
+ RTE_MAX_ETHPORTS);
+ if (num_slaves < 0) {
+ fprintf(stderr, "Failed to get slave list for port = %u\n",
+ bond_pid);
+ return num_slaves;
+ }
+
+ for (i = 0; i < num_slaves; i++) {
+ slave_pid = slave_pids[i];
+ port = &ports[slave_pid];
+ port->port_status =
+ is_stop ? RTE_PORT_STOPPED : RTE_PORT_STARTED;
+ }
+#else
+ RTE_SET_USED(bond_pid);
+ RTE_SET_USED(is_stop);
+#endif
+ return 0;
+}
+
static int
eth_dev_start_mp(uint16_t port_id)
{
- if (is_proc_primary())
- return rte_eth_dev_start(port_id);
+ int ret;
+
+ if (is_proc_primary()) {
+ ret = rte_eth_dev_start(port_id);
+ if (ret != 0)
+ return ret;
+
+ struct rte_port *port = &ports[port_id];
+
+ /*
+ * Starting a bonded port also starts all slaves under the bonded
+ * device. So if this port is bond device, we need to modify the
+ * port status of these slaves.
+ */
+ if (port->bond_flag == 1)
+ return change_bonding_slave_port_status(port_id, false);
+ }
return 0;
}
static int
eth_dev_stop_mp(uint16_t port_id)
{
- if (is_proc_primary())
- return rte_eth_dev_stop(port_id);
+ int ret;
+
+ if (is_proc_primary()) {
+ ret = rte_eth_dev_stop(port_id);
+ if (ret != 0)
+ return ret;
+
+ struct rte_port *port = &ports[port_id];
+
+ /*
+ * Stopping a bonded port also stops all slaves under the bonded
+ * device. So if this port is bond device, we need to modify the
+ * port status of these slaves.
+ */
+ if (port->bond_flag == 1)
+ return change_bonding_slave_port_status(port_id, true);
+ }
return 0;
}
*/
static int all_ports_started(void);
+#ifdef RTE_LIB_GSO
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
+#endif
/* Holds the registered mbuf dynamic flags names. */
char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
return 0;
fail:
- if (iovas)
- free(iovas);
+ free(iovas);
if (addr)
munmap(addr, mem_sz);
ext_num = 0;
break;
}
- mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
- socket_id,
- RTE_MEMZONE_IOVA_CONTIG |
- RTE_MEMZONE_1GB |
- RTE_MEMZONE_SIZE_HINT_ONLY,
- EXTBUF_ZONE_SIZE);
+ mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE,
+ socket_id,
+ RTE_MEMZONE_IOVA_CONTIG |
+ RTE_MEMZONE_1GB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
if (mz == NULL) {
/*
* The caller exits on external buffer creation
int i;
eth_rx_metadata_negotiate_mp(pid);
- flow_pick_transfer_proxy_mp(pid);
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
- if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Apply Rx offloads configuration */
for (i = 0; i < port->dev_info.max_rx_queues; i++)
- port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
+ port->rxq[i].conf.offloads = port->dev_conf.rxmode.offloads;
/* Apply Tx offloads configuration */
for (i = 0; i < port->dev_info.max_tx_queues; i++)
- port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
+ port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
if (eth_link_speed)
port->dev_conf.link_speeds = eth_link_speed;
struct rte_mempool *mbp;
unsigned int nb_mbuf_per_pool;
lcoreid_t lc_id;
+#ifdef RTE_LIB_GRO
struct rte_gro_param gro_param;
+#endif
+#ifdef RTE_LIB_GSO
uint32_t gso_types;
+#endif
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
init_port_config();
- gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
+#ifdef RTE_LIB_GSO
+ gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
+#endif
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
if (mbp == NULL)
mbp = mbuf_pool_find(0, 0);
fwd_lcores[lc_id]->mbp = mbp;
+#ifdef RTE_LIB_GSO
/* initialize GSO context */
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
RTE_ETHER_CRC_LEN;
fwd_lcores[lc_id]->gso_ctx.flag = 0;
+#endif
}
fwd_config_setup();
+#ifdef RTE_LIB_GRO
/* create a gro context for each lcore */
gro_param.gro_types = RTE_GRO_TCP_IPV4;
gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
"rte_gro_ctx_create() failed\n");
}
}
+#endif
}
init_port_config();
}
-
int
init_fwd_streams(void)
{
pktnb_stats[0] = 0;
/* Find the next 2 burst sizes with highest occurrences. */
- for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
+ for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) {
nb_burst = pbs->pkt_burst_spread[nb_pkt];
if (nb_burst == 0)
struct rte_port *port;
streamid_t sm_id;
portid_t pt_id;
+ int ret;
int i;
memset(ports_stats, 0, sizeof(ports_stats));
pt_id = fwd_ports_ids[i];
port = &ports[pt_id];
- rte_eth_stats_get(pt_id, &stats);
+ ret = rte_eth_stats_get(pt_id, &stats);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s: Error: failed to get stats (port %u): %d",
+ __func__, pt_id, ret);
+ continue;
+ }
stats.ipackets -= port->stats.ipackets;
stats.opackets -= port->stats.opackets;
stats.ibytes -= port->stats.ibytes;
{
streamid_t sm_id;
portid_t pt_id;
+ int ret;
int i;
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
- rte_eth_stats_get(pt_id, &ports[pt_id].stats);
+ ret = rte_eth_stats_get(pt_id, &ports[pt_id].stats);
+ if (ret != 0)
+ fprintf(stderr,
+ "%s: Error: failed to clear stats (port %u):%d",
+ __func__, pt_id, ret);
}
for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
struct fwd_stream *fs = fwd_streams[sm_id];
for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
for (rxq = 0; rxq < nb_rxq; rxq++) {
port_id = fwd_ports_ids[rxp];
+
+ /* Polling stopped queues is prohibited. */
+ if (ports[port_id].rxq[rxq].state ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ continue;
+
/**
* testpmd can stuck in the below do while loop
* if rte_eth_rx_burst() always returns nonzero
nb_fs = fc->stream_nb;
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
- (*pkt_fwd)(fsm[sm_id]);
+ if (!fsm[sm_id]->disabled)
+ (*pkt_fwd)(fsm[sm_id]);
#ifdef RTE_LIB_BITRATESTATS
if (bitrate_enabled != 0 &&
bitrate_lcore_id == rte_lcore_id()) {
{
port_fwd_begin_t port_fwd_begin;
port_fwd_end_t port_fwd_end;
+ stream_init_t stream_init = cur_fwd_eng->stream_init;
unsigned int i;
if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
if (!pkt_fwd_shared_rxq_check())
return;
+ if (stream_init != NULL)
+ for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
+ stream_init(fwd_streams[i]);
+
port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
if (port_fwd_begin != NULL) {
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
continue;
/* Fail to setup rx queue, return */
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING,
- RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr,
"Port %d can not be set back to stopped\n", pi);
fprintf(stderr, "Fail to configure port %d hairpin queues\n",
continue;
/* Fail to setup rx queue, return */
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING,
- RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr,
"Port %d can not be set back to stopped\n", pi);
fprintf(stderr, "Fail to configure port %d hairpin queues\n",
ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
nb_rx_desc, socket_id,
rx_conf, mp);
- return ret;
+ goto exit;
}
for (i = 0; i < rx_pkt_nb_segs; i++) {
struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
socket_id, rx_conf, NULL);
rx_conf->rx_seg = NULL;
rx_conf->rx_nseg = 0;
+exit:
+ ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
+ RTE_ETH_QUEUE_STATE_STOPPED :
+ RTE_ETH_QUEUE_STATE_STARTED;
return ret;
}
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
+ if (port_is_bonding_slave(pi)) {
+ fprintf(stderr,
+ "Please remove port %d from bonded device.\n",
+ pi);
+ continue;
+ }
+
need_check_link_status = 0;
port = &ports[pi];
- if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
- RTE_PORT_HANDLING) == 0) {
+ if (port->port_status == RTE_PORT_STOPPED)
+ port->port_status = RTE_PORT_HANDLING;
+ else {
fprintf(stderr, "Port %d is now not stopped\n", pi);
continue;
}
nb_txq + nb_hairpinq,
&(port->dev_conf));
if (diag != 0) {
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr,
"Port %d can not be set back to stopped\n",
pi);
for (k = 0;
k < port->dev_info.max_rx_queues;
k++)
- port->rx_conf[k].offloads |=
+ port->rxq[k].conf.offloads |=
dev_conf.rxmode.offloads;
}
/* Apply Tx offloads configuration */
for (k = 0;
k < port->dev_info.max_tx_queues;
k++)
- port->tx_conf[k].offloads |=
+ port->txq[k].conf.offloads |=
dev_conf.txmode.offloads;
}
}
port->need_reconfig_queues = 0;
/* setup tx queues */
for (qi = 0; qi < nb_txq; qi++) {
+ struct rte_eth_txconf *conf =
+ &port->txq[qi].conf;
+
if ((numa_support) &&
(txring_numa[pi] != NUMA_NO_CONFIG))
diag = rte_eth_tx_queue_setup(pi, qi,
port->nb_tx_desc[qi],
txring_numa[pi],
- &(port->tx_conf[qi]));
+ &(port->txq[qi].conf));
else
diag = rte_eth_tx_queue_setup(pi, qi,
port->nb_tx_desc[qi],
port->socket_id,
- &(port->tx_conf[qi]));
+ &(port->txq[qi].conf));
- if (diag == 0)
+ if (diag == 0) {
+ port->txq[qi].state =
+ conf->tx_deferred_start ?
+ RTE_ETH_QUEUE_STATE_STOPPED :
+ RTE_ETH_QUEUE_STATE_STARTED;
continue;
+ }
/* Fail to setup tx queue, return */
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING,
- RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr,
"Port %d can not be set back to stopped\n",
pi);
diag = rx_queue_setup(pi, qi,
port->nb_rx_desc[qi],
rxring_numa[pi],
- &(port->rx_conf[qi]),
+ &(port->rxq[qi].conf),
mp);
} else {
struct rte_mempool *mp =
diag = rx_queue_setup(pi, qi,
port->nb_rx_desc[qi],
port->socket_id,
- &(port->rx_conf[qi]),
+ &(port->rxq[qi].conf),
mp);
}
if (diag == 0)
continue;
/* Fail to setup rx queue, return */
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING,
- RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr,
"Port %d can not be set back to stopped\n",
pi);
pi, rte_strerror(-diag));
/* Fail to setup rx queue, return */
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr,
"Port %d can not be set back to stopped\n",
pi);
continue;
}
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STARTED;
+ else
fprintf(stderr, "Port %d can not be set into started\n",
pi);
}
port = &ports[pi];
- if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
- RTE_PORT_HANDLING) == 0)
+ if (port->port_status == RTE_PORT_STARTED)
+ port->port_status = RTE_PORT_HANDLING;
+ else
continue;
if (hairpin_mode & 0xf) {
RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
pi);
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
fprintf(stderr, "Port %d can not be set into stopped\n",
pi);
need_check_link_status = 1;
}
port = &ports[pi];
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
+ if (port->port_status == RTE_PORT_CLOSED) {
fprintf(stderr, "Port %d is already closed\n", pi);
continue;
}
if (is_proc_primary()) {
port_flow_flush(pi);
port_flex_item_flush(pi);
+ port_action_handle_flush(pi);
rte_eth_dev_close(pi);
}
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
struct rte_port *port = &ports[pid];
for (qid = 0; qid < nb_rxq; qid++) {
- offloads = port->rx_conf[qid].offloads;
- port->rx_conf[qid] = port->dev_info.default_rxconf;
+ offloads = port->rxq[qid].conf.offloads;
+ port->rxq[qid].conf = port->dev_info.default_rxconf;
if (rxq_share > 0 &&
(port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
/* Non-zero share group to enable RxQ share. */
- port->rx_conf[qid].share_group = pid / rxq_share + 1;
- port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
+ port->rxq[qid].conf.share_group = pid / rxq_share + 1;
+ port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
}
if (offloads != 0)
- port->rx_conf[qid].offloads = offloads;
+ port->rxq[qid].conf.offloads = offloads;
/* Check if any Rx parameters have been passed */
if (rx_pthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
+ port->rxq[qid].conf.rx_thresh.pthresh = rx_pthresh;
if (rx_hthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
+ port->rxq[qid].conf.rx_thresh.hthresh = rx_hthresh;
if (rx_wthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
+ port->rxq[qid].conf.rx_thresh.wthresh = rx_wthresh;
if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
+ port->rxq[qid].conf.rx_free_thresh = rx_free_thresh;
if (rx_drop_en != RTE_PMD_PARAM_UNSET)
- port->rx_conf[qid].rx_drop_en = rx_drop_en;
+ port->rxq[qid].conf.rx_drop_en = rx_drop_en;
port->nb_rx_desc[qid] = nb_rxd;
}
for (qid = 0; qid < nb_txq; qid++) {
- offloads = port->tx_conf[qid].offloads;
- port->tx_conf[qid] = port->dev_info.default_txconf;
+ offloads = port->txq[qid].conf.offloads;
+ port->txq[qid].conf = port->dev_info.default_txconf;
if (offloads != 0)
- port->tx_conf[qid].offloads = offloads;
+ port->txq[qid].conf.offloads = offloads;
/* Check if any Tx parameters have been passed */
if (tx_pthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
+ port->txq[qid].conf.tx_thresh.pthresh = tx_pthresh;
if (tx_hthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
+ port->txq[qid].conf.tx_thresh.hthresh = tx_hthresh;
if (tx_wthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
+ port->txq[qid].conf.tx_thresh.wthresh = tx_wthresh;
if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
+ port->txq[qid].conf.tx_rs_thresh = tx_rs_thresh;
if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
+ port->txq[qid].conf.tx_free_thresh = tx_free_thresh;
port->nb_tx_desc[qid] = nb_txd;
}
if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
port->dev_conf.rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_RSS);
+ (rx_mq_mode & RTE_ETH_MQ_RX_RSS);
} else {
- port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+ port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_RSS_HASH;
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
for (i = 0;
i < port->dev_info.nb_rx_queues;
i++)
- port->rx_conf[i].offloads &=
- ~DEV_RX_OFFLOAD_RSS_HASH;
+ port->rxq[i].conf.offloads &=
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
}
}
vmdq_rx_conf->enable_default_pool = 0;
vmdq_rx_conf->default_pool = 0;
vmdq_rx_conf->nb_queue_pools =
- (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_tx_conf->nb_queue_pools =
- (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
vmdq_rx_conf->pool_map[i].pools =
1 << (i % vmdq_rx_conf->nb_queue_pools);
}
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
}
/* set DCB mode of RX and TX of multiple queues */
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
- eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
+ eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
} else {
struct rte_eth_dcb_rx_conf *rx_conf =
ð_conf->rx_adv_conf.dcb_rx_conf;
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
- for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
- (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
+ (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
eth_conf->rx_adv_conf.rss_conf = rss_conf;
- eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+ eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
}
if (pfc_en)
eth_conf->dcb_capability_en =
- ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+ RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
else
- eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+ eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
return 0;
}
retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+ /* remove RSS HASH offload for DCB in vt mode */
+ if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
+ port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ for (i = 0; i < nb_rxq; i++)
+ rte_port->rxq[i].conf.offloads &=
+ ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ }
/* re-configure the device . */
retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
rxtx_port_config(pid);
/* VLAN filter */
- rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
for (i = 0; i < RTE_DIM(vlan_tags); i++)
rx_vft_set(pid, vlan_tags[i], 1);
port_id, rte_strerror(-ret));
}
+#ifdef RTE_LIB_METRICS
/* Init metrics library */
rte_metrics_init(rte_socket_id());
+#endif
#ifdef RTE_LIB_LATENCYSTATS
if (latencystats_enabled != 0) {
}
#endif
#ifdef RTE_LIB_CMDLINE
+ if (init_cmdline() != 0)
+ rte_exit(EXIT_FAILURE,
+ "Could not initialise cmdline context.\n");
+
if (strlen(cmdline_filename) != 0)
cmdline_read_from_file(cmdline_filename);