rxq_info->state &= ~SFC_RXQ_FLUSHING;
}
+static int
+sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
+{
+ int rc = 0;
+
+ if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) {
+ rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr);
+ if (rc == 0)
+ rxq->evq->read_ptr_primed = rxq->evq->read_ptr;
+ }
+ return rc;
+}
+
static void
sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
{
sfc_efx_rx_qrefill(rxq);
+ if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN)
+ sfc_efx_rx_qprime(rxq);
+
return done_pkts;
}
return RTE_ETH_RX_DESC_UNAVAIL;
}
+boolean_t
+sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
+ boolean_t rx_scatter_enabled, const char **error)
+{
+ if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) {
+ *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
/** Get Rx datapath ops by the datapath RxQ handle */
const struct sfc_dp_rx *
sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
rte_free(rxq);
}
+
+/* Use qstop and qstart functions in the case of qstart failure */
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+
+
static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
static int
sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
/* libefx-based datapath is specific to libefx-based PMD */
struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ int rc;
rxq->common = crxq->common;
rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+ if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) {
+ rc = sfc_efx_rx_qprime(rxq);
+ if (rc != 0)
+ goto fail_rx_qprime;
+ }
+
return 0;
+
+fail_rx_qprime:
+ sfc_efx_rx_qstop(dp_rxq, NULL);
+ sfc_efx_rx_qpurge(dp_rxq);
+ return rc;
}
-static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
static void
sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
__rte_unused unsigned int *evq_read_ptr)
*/
}
-static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
static void
sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
{
rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
}
+static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable;
+static int
+sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ int rc = 0;
+
+ rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN;
+ if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) {
+ rc = sfc_efx_rx_qprime(rxq);
+ if (rc != 0)
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
+ }
+ return rc;
+}
+
+static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable;
+static int
+sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ /* Cannot disarm, just disable rearm */
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
+ return 0;
+}
+
struct sfc_dp_rx sfc_efx_rx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
- .features = SFC_DP_RX_FEAT_SCATTER |
- SFC_DP_RX_FEAT_CHECKSUM,
+ .features = SFC_DP_RX_FEAT_INTR,
+ .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH,
+ .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
.supported_ptypes_get = sfc_efx_supported_ptypes_get,
.qdesc_npending = sfc_efx_rx_qdesc_npending,
.qdesc_status = sfc_efx_rx_qdesc_status,
+ .intr_enable = sfc_efx_rx_intr_enable,
+ .intr_disable = sfc_efx_rx_intr_disable,
.pkt_burst = sfc_efx_recv_pkts,
};
sfc_ev_qstop(rxq->evq);
}
-uint64_t
-sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
+static uint64_t
+sfc_rx_get_offload_mask(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
- uint64_t caps = 0;
+ uint64_t no_caps = 0;
- caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (encp->enc_tunnel_encapsulations_supported == 0)
+ no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
- if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
- caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
- caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
- caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
- }
+ return ~no_caps;
+}
- if (encp->enc_tunnel_encapsulations_supported &&
- (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
- caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+uint64_t
+sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
- return caps;
+ caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ return caps & sfc_rx_get_offload_mask(sa);
}
uint64_t
sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
{
- uint64_t caps = 0;
-
- if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
- caps |= DEV_RX_OFFLOAD_SCATTER;
-
- return caps;
+ return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
}
static int
* Start is aligned the same or better than end,
* just align length.
*/
- buf_size = P2ALIGN(buf_size, nic_align_end);
+ buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end);
}
return buf_size;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
struct sfc_dp_rx_hw_limits hw_limits;
+ uint16_t rx_free_thresh;
+ const char *error;
memset(&hw_limits, 0, sizeof(hw_limits));
hw_limits.rxq_max_entries = sa->rxq_max_entries;
goto fail_bad_conf;
}
- if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
- sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
- "object size is too small", sw_index);
+ if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
+ encp->enc_rx_prefix_size,
+ (offloads & DEV_RX_OFFLOAD_SCATTER),
+ &error)) {
+ sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
"PDU size %u plus Rx prefix %u bytes",
sw_index, buf_size, (unsigned int)sa->port.pdu,
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
- (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
rxq = &sa->rxq_ctrl[sw_index];
rxq->evq = evq;
rxq->hw_index = sw_index;
+ /*
+ * If Rx refill threshold is specified (its value is non zero) in
+ * Rx configuration, use specified value. Otherwise use 1/8 of
+ * the Rx descriptors number as the default. It allows to keep
+ * Rx ring full-enough and does not refill too aggressive if
+ * packet rate is high.
+ *
+ * Since PMD refills in bulks waiting for full bulk may be
+ * refilled (basically round down), it is better to round up
+ * here to mitigate it a bit.
+ */
+ rx_free_thresh = (rx_conf->rx_free_thresh != 0) ?
+ rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8);
+ /* Rx refill threshold cannot be smaller than refill bulk */
rxq_info->refill_threshold =
- RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
+ RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK);
rxq_info->refill_mb_pool = mb_pool;
rxq->buf_size = buf_size;
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
+ info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
+ if ((offloads_supported & DEV_RX_OFFLOAD_RSS_HASH) &&
+ (~rxmode->offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
return rc;
}