Speed capabilities = Y
Link status = Y
Link status event = Y
+Rx interrupt = Y
Fast mbuf free = Y
Queue start/stop = Y
Runtime Rx queue setup = Y
- Scattered Rx DMA for packet that are larger that a single Rx descriptor
+- Receive queue interrupts
+
- Deferred receive and transmit queue start
- Transmit VLAN insertion (if running firmware variant supports it)
The features not yet supported include:
-- Receive queue interrupts
-
- Priority-based flow control
- Configurable RX CRC stripping (always stripped)
of Ethernet controllers. These controllers support link speeds up to
200Gbps, 50G PAM-4, and PCIe 4.0.
+* **Updated Solarflare network PMD.**
+
+ Updated the Solarflare ``sfc_efx`` driver with changes including:
+
+ * Added support for Rx interrupts.
+
* **Added memif PMD.**
Added the new Shared Memory Packet Interface (``memif``) PMD.
rc = EINVAL;
}
- if (conf->intr_conf.rxq != 0) {
+ if (conf->intr_conf.rxq != 0 &&
+ (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
sfc_err(sa, "Receive queue interrupt not supported");
rc = EINVAL;
}
efx_intr_type_t type;
rte_intr_callback_fn handler;
boolean_t lsc_intr;
+ boolean_t rxq_intr;
};
struct sfc_rxq;
/** DMA-mapped Rx descriptors ring */
void *rxq_hw_ring;
+ /** Event queue index in hardware */
+ unsigned int evq_hw_index;
/** Associated event queue size */
unsigned int evq_entries;
/** Hardware event ring */
/** Check Rx descriptor status */
typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
uint16_t offset);
+/** Enable Rx interrupts */
+typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Disable Rx interrupts */
+typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq);
/** Receive datapath definition */
struct sfc_dp_rx {
#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1
#define SFC_DP_RX_FEAT_FLOW_FLAG 0x2
#define SFC_DP_RX_FEAT_FLOW_MARK 0x4
+#define SFC_DP_RX_FEAT_INTR 0x8
/**
* Rx offload capabilities supported by the datapath on device
* level only if HW/FW supports it.
sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
sfc_dp_rx_qdesc_npending_t *qdesc_npending;
sfc_dp_rx_qdesc_status_t *qdesc_status;
+ sfc_dp_rx_intr_enable_t *intr_enable;
+ sfc_dp_rx_intr_disable_t *intr_disable;
eth_rx_burst_t pkt_burst;
};
return sap->dp_rx->pool_ops_supported(pool);
}
+static int
+sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_rxq_info *rxq_info;
+
+ SFC_ASSERT(queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[queue_id];
+
+ return sap->dp_rx->intr_enable(rxq_info->dp);
+}
+
+static int
+sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_rxq_info *rxq_info;
+
+ SFC_ASSERT(queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[queue_id];
+
+ return sap->dp_rx->intr_disable(rxq_info->dp);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
.rx_descriptor_done = sfc_rx_descriptor_done,
.rx_descriptor_status = sfc_rx_descriptor_status,
.tx_descriptor_status = sfc_tx_descriptor_status,
+ .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
+ .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
.tx_queue_setup = sfc_tx_queue_setup,
.tx_queue_release = sfc_tx_queue_release,
.flow_ctrl_get = sfc_flow_ctrl_get,
(void)memset((void *)esmp->esm_base, 0xff,
efx_evq_size(sa->nic, evq->entries));
- if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
+ if ((sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) ||
+ (sa->intr.rxq_intr && evq->dp_rxq != NULL))
evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
else
evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
efx_evq_t *common;
const efx_ev_callbacks_t *callbacks;
unsigned int read_ptr;
+ unsigned int read_ptr_primed;
boolean_t exception;
efsys_mem_t mem;
struct sfc_dp_rxq *dp_rxq;
intr_handle = &pci_dev->intr_handle;
if (intr->handler != NULL) {
+ if (intr->rxq_intr && rte_intr_cap_multiple(intr_handle)) {
+ uint32_t intr_vector;
+
+ intr_vector = sa->eth_dev->data->nb_rx_queues;
+ rc = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (rc != 0)
+ goto fail_rte_intr_efd_enable;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ intr_handle->intr_vec =
+ rte_calloc("intr_vec",
+ sa->eth_dev->data->nb_rx_queues, sizeof(int),
+ 0);
+ if (intr_handle->intr_vec == NULL) {
+ sfc_err(sa,
+ "Failed to allocate %d rx_queues intr_vec",
+ sa->eth_dev->data->nb_rx_queues);
+ goto fail_intr_vector_alloc;
+ }
+ }
+
sfc_log_init(sa, "rte_intr_callback_register");
rc = rte_intr_callback_register(intr_handle, intr->handler,
(void *)sa);
rte_intr_callback_unregister(intr_handle, intr->handler, (void *)sa);
fail_rte_intr_cb_reg:
+ rte_free(intr_handle->intr_vec);
+
+fail_intr_vector_alloc:
+ rte_intr_efd_disable(intr_handle);
+
+fail_rte_intr_efd_enable:
efx_intr_fini(sa->nic);
fail_intr_init:
efx_intr_disable(sa->nic);
intr_handle = &pci_dev->intr_handle;
+
+ rte_free(intr_handle->intr_vec);
+ rte_intr_efd_disable(intr_handle);
+
if (rte_intr_disable(intr_handle) != 0)
sfc_err(sa, "cannot disable interrupts");
intr->handler = NULL;
intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
- if (!intr->lsc_intr) {
- sfc_notice(sa, "LSC tracking using interrupts is disabled");
+ intr->rxq_intr = (sa->eth_dev->data->dev_conf.intr_conf.rxq != 0);
+
+ if (!intr->lsc_intr && !intr->rxq_intr)
goto done;
- }
switch (intr->type) {
case EFX_INTR_MESSAGE:
rxq_info->state &= ~SFC_RXQ_FLUSHING;
}
+static int
+sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
+{
+ int rc = 0;
+
+ if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) {
+ rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr);
+ if (rc == 0)
+ rxq->evq->read_ptr_primed = rxq->evq->read_ptr;
+ }
+ return rc;
+}
+
static void
sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
{
sfc_efx_rx_qrefill(rxq);
+ if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN)
+ sfc_efx_rx_qprime(rxq);
+
return done_pkts;
}
rte_free(rxq);
}
+
+/* Use qstop and qstart functions in the case of qstart failure */
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+
+
static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
static int
sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
/* libefx-based datapath is specific to libefx-based PMD */
struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ int rc;
rxq->common = crxq->common;
rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+ if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) {
+ rc = sfc_efx_rx_qprime(rxq);
+ if (rc != 0)
+ goto fail_rx_qprime;
+ }
+
return 0;
+
+fail_rx_qprime:
+ sfc_efx_rx_qstop(dp_rxq, NULL);
+ sfc_efx_rx_qpurge(dp_rxq);
+ return rc;
}
-static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
static void
sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
__rte_unused unsigned int *evq_read_ptr)
*/
}
-static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
static void
sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
{
rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
}
+static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable;
+static int
+sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ int rc = 0;
+
+ rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN;
+ if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) {
+ rc = sfc_efx_rx_qprime(rxq);
+ if (rc != 0)
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
+ }
+ return rc;
+}
+
+static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable;
+static int
+sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ /* Cannot disarm, just disable rearm */
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
+ return 0;
+}
+
struct sfc_dp_rx sfc_efx_rx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
- .features = 0,
+ .features = SFC_DP_RX_FEAT_INTR,
.dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
.queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.supported_ptypes_get = sfc_efx_supported_ptypes_get,
.qdesc_npending = sfc_efx_rx_qdesc_npending,
.qdesc_status = sfc_efx_rx_qdesc_status,
+ .intr_enable = sfc_efx_rx_intr_enable,
+ .intr_disable = sfc_efx_rx_intr_disable,
.pkt_burst = sfc_efx_recv_pkts,
};
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
+ info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
#define SFC_EFX_RXQ_FLAG_STARTED 0x1
#define SFC_EFX_RXQ_FLAG_RUNNING 0x2
#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4
+#define SFC_EFX_RXQ_FLAG_INTR_EN 0x8
unsigned int ptr_mask;
unsigned int pending;
unsigned int completed;