static struct sfc_dp_list sfc_dp_head =
TAILQ_HEAD_INITIALIZER(sfc_dp_head);
+
+static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
+
+
static int
sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
{
sfc_err(sa, "unexpected adapter state %u on close", sa->state);
break;
}
+
+ /*
+ * Cleanup all resources in accordance with RTE_ETH_DEV_CLOSE_REMOVE.
+ * Rollback primary process sfc_eth_dev_init() below.
+ */
+
+ sfc_eth_dev_clear_ops(dev);
+
+ sfc_detach(sa);
+ sfc_unprobe(sa);
+
+ sfc_kvargs_cleanup(sa);
+
sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
sfc_log_init(sa, "done");
+
+ /* Required for logging, so cleanup last */
+ sa->eth_dev = NULL;
+
+ dev->process_private = NULL;
+ free(sa);
}
static void
if (pdu > EFX_MAC_PDU_MAX) {
sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
(unsigned int)mtu, (unsigned int)pdu,
- EFX_MAC_PDU_MAX);
+ (unsigned int)EFX_MAC_PDU_MAX);
goto fail_inval;
}
return sap->dp_rx->pool_ops_supported(pool);
}
+static int
+sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_rxq_info *rxq_info;
+
+ SFC_ASSERT(queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[queue_id];
+
+ return sap->dp_rx->intr_enable(rxq_info->dp);
+}
+
+static int
+sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_rxq_info *rxq_info;
+
+ SFC_ASSERT(queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[queue_id];
+
+ return sap->dp_rx->intr_disable(rxq_info->dp);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
.rx_descriptor_done = sfc_rx_descriptor_done,
.rx_descriptor_status = sfc_rx_descriptor_status,
.tx_descriptor_status = sfc_tx_descriptor_status,
+ .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
+ .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
.tx_queue_setup = sfc_tx_queue_setup,
.tx_queue_release = sfc_tx_queue_release,
.flow_ctrl_get = sfc_flow_ctrl_get,
sfc_log_init(sa, "entry");
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
if (dev->data->mac_addrs == NULL) {
rc = ENOMEM;
static int
sfc_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa;
-
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
sfc_eth_dev_secondary_clear_ops(dev);
return 0;
}
- sa = sfc_adapter_by_eth_dev(dev);
- sfc_log_init(sa, "entry");
-
- sfc_adapter_lock(sa);
-
- sfc_eth_dev_clear_ops(dev);
-
- sfc_detach(sa);
- sfc_unprobe(sa);
-
- sfc_kvargs_cleanup(sa);
-
- sfc_adapter_unlock(sa);
- sfc_adapter_lock_fini(sa);
-
- sfc_log_init(sa, "done");
-
- /* Required for logging, so cleanup last */
- sa->eth_dev = NULL;
-
- dev->process_private = NULL;
- free(sa);
+ sfc_dev_close(dev);
return 0;
}