/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
* Copyright(c) 2016-2019 Solarflare Communications Inc.
*
* This software was jointly developed between OKTET Labs (under contract
#include "sfc.h"
#include "sfc_debug.h"
+#include "sfc_flow_tunnel.h"
#include "sfc_log.h"
#include "sfc_ev.h"
#include "sfc_rx.h"
+#include "sfc_mae_counter.h"
#include "sfc_kvargs.h"
#include "sfc_tweak.h"
rxq_info->state &= ~SFC_RXQ_FLUSHING;
}
+/* This returns the running counter, which is not bounded by ring size */
+unsigned int
+sfc_rx_get_pushed(struct sfc_adapter *sa, struct sfc_dp_rxq *dp_rxq)
+{
+ SFC_ASSERT(sa->priv.dp_rx->get_pushed != NULL);
+
+ return sa->priv.dp_rx->get_pushed(dp_rxq);
+}
+
static int
sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
{
++i, id = (id + 1) & rxq->ptr_mask) {
m = objs[i];
- MBUF_RAW_ALLOC_CHECK(m);
+ __rte_mbuf_raw_sanity_check(m);
rxd = &rxq->sw_desc[id];
rxd->mbuf = m;
SFC_ASSERT(added != rxq->added);
rxq->added = added;
efx_rx_qpush(rxq->common, added, &rxq->pushed);
+ rxq->dp.dpq.dbells++;
}
static uint64_t
switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
- mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case EFX_PKT_IPV4:
- mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
default:
- RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
- SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
- PKT_RX_IP_CKSUM_UNKNOWN);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) ==
+ RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN);
break;
}
(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
- mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
case EFX_PKT_TCP:
case EFX_PKT_UDP:
- mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
break;
default:
- RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
- SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
- PKT_RX_L4_CKSUM_UNKNOWN);
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) ==
+ RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN);
break;
}
EFX_RX_HASHALG_TOEPLITZ,
mbuf_data);
- m->ol_flags |= PKT_RX_RSS_HASH;
+ m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
boolean_t
sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
- boolean_t rx_scatter_enabled, const char **error)
+ boolean_t rx_scatter_enabled, uint32_t rx_scatter_max,
+ const char **error)
{
- if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) {
- *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
+ uint32_t effective_rx_scatter_max;
+ uint32_t rx_scatter_bufs;
+
+ effective_rx_scatter_max = rx_scatter_enabled ? rx_scatter_max : 1;
+ rx_scatter_bufs = EFX_DIV_ROUND_UP(pdu + rx_prefix_size, rx_buf_size);
+
+ if (rx_scatter_bufs > effective_rx_scatter_max) {
+ if (rx_scatter_enabled)
+ *error = "Possible number of Rx scatter buffers exceeds maximum number";
+ else
+ *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
return B_FALSE;
}
struct sfc_efx_rxq *rxq;
int rc;
+ rc = ENOTSUP;
+ if (info->nic_dma_info->nb_regions > 0)
+ goto fail_nic_dma;
+
rc = ENOMEM;
rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
RTE_CACHE_LINE_SIZE, socket_id);
rte_free(rxq);
fail_rxq_alloc:
+fail_nic_dma:
return rc;
}
static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
static int
sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
- __rte_unused unsigned int evq_read_ptr)
+ __rte_unused unsigned int evq_read_ptr,
+ const efx_rx_prefix_layout_t *pinfo)
{
/* libefx-based datapath is specific to libefx-based PMD */
struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
int rc;
+ /*
+ * libefx API is used to extract information from Rx prefix and
+ * it guarantees consistency. Just do length check to ensure
+ * that we reserved space in Rx buffers correctly.
+ */
+ if (rxq->prefix_size != pinfo->erpl_length)
+ return ENOTSUP;
+
rxq->common = crxq->common;
rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
.type = SFC_DP_RX,
- .hw_fw_caps = 0,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_RX_EFX,
},
.features = SFC_DP_RX_FEAT_INTR,
- .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_RSS_HASH,
- .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
+ .dev_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH,
+ .queue_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
};
static void
-sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
int rc;
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
(wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
if (rxq_info->state & SFC_RXQ_FLUSHING)
- sfc_err(sa, "RxQ %u flush timed out", sw_index);
+ sfc_err(sa, "RxQ %d (internal %u) flush timed out",
+ ethdev_qid, sw_index);
if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
- sfc_err(sa, "RxQ %u flush failed", sw_index);
+ sfc_err(sa, "RxQ %d (internal %u) flush failed",
+ ethdev_qid, sw_index);
if (rxq_info->state & SFC_RXQ_FLUSHED)
- sfc_notice(sa, "RxQ %u flushed", sw_index);
+ sfc_notice(sa, "RxQ %d (internal %u) flushed",
+ ethdev_qid, sw_index);
}
sa->priv.dp_rx->qpurge(rxq_info->dp);
}
int
-sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
+ efx_rx_prefix_layout_t pinfo;
int rc;
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
rxq = &sa->rxq_ctrl[sw_index];
evq = rxq->evq;
- rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index));
if (rc != 0)
goto fail_ev_qstart;
if (rc != 0)
goto fail_rx_qcreate;
+ rc = efx_rx_prefix_get_layout(rxq->common, &pinfo);
+ if (rc != 0)
+ goto fail_prefix_get_layout;
+
efx_rx_qenable(rxq->common);
- rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr);
+ rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr, &pinfo);
if (rc != 0)
goto fail_dp_qstart;
rxq_info->state |= SFC_RXQ_STARTED;
- if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) {
+ if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
}
/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
- sa->eth_dev->data->rx_queue_state[sw_index] =
- RTE_ETH_QUEUE_STATE_STARTED;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->rx_queue_state[ethdev_qid] =
+ RTE_ETH_QUEUE_STATE_STARTED;
return 0;
fail_dp_qstart:
efx_rx_qdestroy(rxq->common);
+fail_prefix_get_layout:
fail_rx_qcreate:
fail_bad_contig_block_size:
fail_mp_get_info:
}
void
-sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
- sfc_log_init(sa, "sw_index=%u", sw_index);
-
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
- sa->eth_dev->data->rx_queue_state[sw_index] =
- RTE_ETH_QUEUE_STATE_STOPPED;
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->rx_queue_state[ethdev_qid] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
rxq = &sa->rxq_ctrl[sw_index];
sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
- if (sw_index == 0)
+ if (ethdev_qid == 0)
efx_mac_filter_default_rxq_clear(sa->nic);
sfc_rx_qflush(sa, sw_index);
uint64_t no_caps = 0;
if (encp->enc_tunnel_encapsulations_supported == 0)
- no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
return ~no_caps;
}
{
uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
- caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
return caps & sfc_rx_get_offload_mask(sa);
}
/* Make sure that end padding does not write beyond the buffer */
if (buf_aligned < nic_align_end) {
/*
- * Estimate space which can be lost. If guarnteed buffer
+ * Estimate space which can be lost. If guaranteed buffer
* size is odd, lost space is (nic_align_end - 1). More
* accurate formula is below.
*/
}
int
-sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
int rc;
SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- offloads = rx_conf->offloads |
- sa->eth_dev->data->dev_conf.rxmode.offloads;
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ offloads = rx_conf->offloads;
+ /* Add device level Rx offloads if the queue is an ethdev Rx queue */
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads;
+
rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
if (buf_size == 0) {
- sfc_err(sa, "RxQ %u mbuf pool object size is too small",
- sw_index);
+ sfc_err(sa,
+ "RxQ %d (internal %u) mbuf pool object size is too small",
+ ethdev_qid, sw_index);
rc = EINVAL;
goto fail_bad_conf;
}
if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
encp->enc_rx_prefix_size,
- (offloads & DEV_RX_OFFLOAD_SCATTER),
+ (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
+ encp->enc_rx_scatter_max,
&error)) {
- sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error);
- sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
+ sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
+ ethdev_qid, sw_index, error);
+ sfc_err(sa,
+ "RxQ %d (internal %u) calculated Rx buffer size is %u vs "
"PDU size %u plus Rx prefix %u bytes",
- sw_index, buf_size, (unsigned int)sa->port.pdu,
- encp->enc_rx_prefix_size);
+ ethdev_qid, sw_index, buf_size,
+ (unsigned int)sa->port.pdu, encp->enc_rx_prefix_size);
rc = EINVAL;
goto fail_bad_conf;
}
else
rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
- rxq_info->type_flags =
- (offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ rxq_info->type_flags |=
+ (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
(sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
+ rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
+
+ if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
+ rxq_info->type_flags |= EFX_RXQ_FLAG_USER_FLAG;
+
+ if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
+ sfc_flow_tunnel_is_active(sa))
+ rxq_info->type_flags |= EFX_RXQ_FLAG_USER_MARK;
+
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
evq_entries, socket_id, &evq);
if (rc != 0)
rxq_info->refill_mb_pool = mb_pool;
if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
- (offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
else
rxq_info->rxq_flags = 0;
rxq->buf_size = buf_size;
- rc = sfc_dma_alloc(sa, "rxq", sw_index,
+ rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_NIC_DMA_ADDR_RX_RING,
efx_rxq_size(sa->nic, rxq_info->entries),
socket_id, &rxq->mem);
if (rc != 0)
info.buf_size = buf_size;
info.batch_max = encp->enc_rx_batch_max;
info.prefix_size = encp->enc_rx_prefix_size;
+
+ if (sfc_flow_tunnel_is_active(sa))
+ info.user_mark_mask = SFC_FT_USER_MARK_MASK;
+ else
+ info.user_mark_mask = UINT32_MAX;
+
info.flags = rxq_info->rxq_flags;
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
- info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
+ info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index);
info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
info.vi_window_shift = encp->enc_vi_window_shift;
+ info.fcw_offset = sa->fcw_offset;
+
+ info.nic_dma_info = &sas->nic_dma_info;
rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
}
void
-sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
- sa->eth_dev->data->rx_queues[sw_index] = NULL;
+ ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
+
+ if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
+ sa->eth_dev->data->rx_queues[ethdev_qid] = NULL;
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
* Mapping between RTE RSS hash functions and their EFX counterparts.
*/
static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
- { ETH_RSS_NONFRAG_IPV4_TCP,
+ { RTE_ETH_RSS_NONFRAG_IPV4_TCP,
EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
- { ETH_RSS_NONFRAG_IPV4_UDP,
+ { RTE_ETH_RSS_NONFRAG_IPV4_UDP,
EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
- { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+ { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
- { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+ { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
- { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+ { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
EFX_RX_HASH(IPV4, 2TUPLE) },
- { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_IPV6_EX,
+ { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_IPV6_EX,
EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
EFX_RX_HASH(IPV6, 2TUPLE) }
};
return rc;
}
+struct sfc_rxq_info *
+sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
+ sfc_ethdev_qid_t ethdev_qid)
+{
+ sfc_sw_index_t sw_index;
+
+ SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
+ SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
+
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
+ return &sas->rxq_info[sw_index];
+}
+
+struct sfc_rxq *
+sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid)
+{
+ struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+ sfc_sw_index_t sw_index;
+
+ SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
+ SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
+
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
+ return &sa->rxq_ctrl[sw_index];
+}
+
int
sfc_rx_start(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
int rc;
- sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
+ sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
+ sas->rxq_count);
rc = efx_rx_init(sa->nic);
if (rc != 0)
sfc_rx_stop(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
- sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
+ sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
+ sas->rxq_count);
sw_index = sas->rxq_count;
while (sw_index-- > 0) {
efx_rx_fini(sa->nic);
}
-static int
-sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+int
+sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
+ unsigned int extra_efx_type_flags)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
SFC_ASSERT(rte_is_power_of_2(max_entries));
rxq_info->max_entries = max_entries;
+ rxq_info->type_flags = extra_efx_type_flags;
return 0;
}
int rc = 0;
switch (rxmode->mq_mode) {
- case ETH_MQ_RX_NONE:
+ case RTE_ETH_MQ_RX_NONE:
/* No special checks are required */
break;
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
sfc_err(sa, "RSS is not available");
rc = EINVAL;
* so unsupported offloads cannot be added as the result of
* below check.
*/
- if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
- (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+ if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
+ (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
- rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
}
- if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
- rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
return rc;
sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- int sw_index;
+ sfc_sw_index_t sw_index;
+ sfc_ethdev_qid_t ethdev_qid;
- SFC_ASSERT(nb_rx_queues <= sas->rxq_count);
+ SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count);
- sw_index = sas->rxq_count;
- while (--sw_index >= (int)nb_rx_queues) {
- if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)
+ /*
+ * Finalize only ethdev queues since other ones are finalized only
+ * on device close and they may require additional deinitialization.
+ */
+ ethdev_qid = sas->ethdev_rxq_count;
+ while (--ethdev_qid >= (int)nb_rx_queues) {
+ struct sfc_rxq_info *rxq_info;
+
+ rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid);
+ if (rxq_info->state & SFC_RXQ_INITIALIZED) {
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
+ ethdev_qid);
sfc_rx_qfini(sa, sw_index);
+ }
+
}
- sas->rxq_count = nb_rx_queues;
+ sas->ethdev_rxq_count = nb_rx_queues;
}
/**
struct sfc_rss *rss = &sas->rss;
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+ const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas);
+ const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues;
+ bool reconfigure;
int rc;
sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
- nb_rx_queues, sas->rxq_count);
+ nb_rx_queues, sas->ethdev_rxq_count);
rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
if (rc != 0)
goto fail_check_mode;
- if (nb_rx_queues == sas->rxq_count)
+ if (nb_rxq_total == sas->rxq_count) {
+ reconfigure = true;
goto configure_rss;
+ }
if (sas->rxq_info == NULL) {
+ reconfigure = false;
rc = ENOMEM;
- sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total,
sizeof(sas->rxq_info[0]), 0,
sa->socket_id);
if (sas->rxq_info == NULL)
* since it should not be shared.
*/
rc = ENOMEM;
- sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0]));
+ sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0]));
if (sa->rxq_ctrl == NULL)
goto fail_rxqs_ctrl_alloc;
} else {
struct sfc_rxq_info *new_rxq_info;
struct sfc_rxq *new_rxq_ctrl;
- if (nb_rx_queues < sas->rxq_count)
+ reconfigure = true;
+
+ /* Do not uninitialize reserved queues */
+ if (nb_rx_queues < sas->ethdev_rxq_count)
sfc_rx_fini_queues(sa, nb_rx_queues);
rc = ENOMEM;
new_rxq_info =
rte_realloc(sas->rxq_info,
- nb_rx_queues * sizeof(sas->rxq_info[0]), 0);
- if (new_rxq_info == NULL && nb_rx_queues > 0)
+ nb_rxq_total * sizeof(sas->rxq_info[0]), 0);
+ if (new_rxq_info == NULL && nb_rxq_total > 0)
goto fail_rxqs_realloc;
rc = ENOMEM;
new_rxq_ctrl = realloc(sa->rxq_ctrl,
- nb_rx_queues * sizeof(sa->rxq_ctrl[0]));
- if (new_rxq_ctrl == NULL && nb_rx_queues > 0)
+ nb_rxq_total * sizeof(sa->rxq_ctrl[0]));
+ if (new_rxq_ctrl == NULL && nb_rxq_total > 0)
goto fail_rxqs_ctrl_realloc;
sas->rxq_info = new_rxq_info;
sa->rxq_ctrl = new_rxq_ctrl;
- if (nb_rx_queues > sas->rxq_count) {
- memset(&sas->rxq_info[sas->rxq_count], 0,
- (nb_rx_queues - sas->rxq_count) *
+ if (nb_rxq_total > sas->rxq_count) {
+ unsigned int rxq_count = sas->rxq_count;
+
+ memset(&sas->rxq_info[rxq_count], 0,
+ (nb_rxq_total - rxq_count) *
sizeof(sas->rxq_info[0]));
- memset(&sa->rxq_ctrl[sas->rxq_count], 0,
- (nb_rx_queues - sas->rxq_count) *
+ memset(&sa->rxq_ctrl[rxq_count], 0,
+ (nb_rxq_total - rxq_count) *
sizeof(sa->rxq_ctrl[0]));
}
}
- while (sas->rxq_count < nb_rx_queues) {
- rc = sfc_rx_qinit_info(sa, sas->rxq_count);
+ while (sas->ethdev_rxq_count < nb_rx_queues) {
+ sfc_sw_index_t sw_index;
+
+ sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
+ sas->ethdev_rxq_count);
+ rc = sfc_rx_qinit_info(sa, sw_index, 0);
if (rc != 0)
goto fail_rx_qinit_info;
- sas->rxq_count++;
+ sas->ethdev_rxq_count++;
+ }
+
+ sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues;
+
+ if (!reconfigure) {
+ rc = sfc_mae_counter_rxq_init(sa);
+ if (rc != 0)
+ goto fail_count_rxq_init;
}
configure_rss:
- rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sas->rxq_count, EFX_MAXRSS) : 0;
+ rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
+ MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
if (rss->channels > 0) {
struct rte_eth_rss_conf *adv_conf_rss;
- unsigned int sw_index;
+ sfc_sw_index_t sw_index;
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
rss->tbl[sw_index] = sw_index % rss->channels;
return 0;
fail_rx_process_adv_conf_rss:
+ if (!reconfigure)
+ sfc_mae_counter_rxq_fini(sa);
+
+fail_count_rxq_init:
fail_rx_qinit_info:
fail_rxqs_ctrl_realloc:
fail_rxqs_realloc:
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
sfc_rx_fini_queues(sa, 0);
+ sfc_mae_counter_rxq_fini(sa);
rss->channels = 0;