eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->intr_handle = &dev->intr_handle;
+ /* allow ethdev to remove on close */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
return eth_dev;
}
}
static void
-hn_dev_close(struct rte_eth_dev *dev __rte_unused)
+hn_dev_close(struct rte_eth_dev *dev)
{
- PMD_INIT_LOG(DEBUG, "close");
+ PMD_INIT_FUNC_TRACE();
hn_vf_close(dev);
+ hn_dev_free_queues(dev);
}
static const struct eth_dev_ops hn_eth_dev_ops = {
return error;
}
-void
-hn_dev_rx_queue_release(void *arg)
+static void
+hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
{
- struct hn_rx_queue *rxq = arg;
-
- PMD_INIT_FUNC_TRACE();
if (!rxq)
return;
hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
/* Keep primary queue to allow for control operations */
- if (rxq != rxq->hv->primary) {
- rte_free(rxq->event_buf);
- rte_free(rxq);
- }
+ if (keep_primary && rxq == rxq->hv->primary)
+ return;
+
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+}
+
+void
+hn_dev_rx_queue_release(void *arg)
+{
+ struct hn_rx_queue *rxq = arg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hn_rx_queue_free(rxq, true);
}
int
return nb_rcv;
}
+
+void
+hn_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ hn_rx_queue_free(rxq, false);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ hn_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
void hn_dev_rx_queue_release(void *arg);
+void hn_dev_free_queues(struct rte_eth_dev *dev);
/* Check if VF is attached */
static inline bool
void hn_vf_close(struct rte_eth_dev *dev)
{
- VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
+ struct hn_data *hv = dev->data->dev_private;
+ uint16_t vf_port;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_port = hv->vf_port;
+ if (vf_port != HN_INVALID_PORT)
+ rte_eth_dev_close(vf_port);
+
+ hv->vf_port = HN_INVALID_PORT;
+ rte_spinlock_unlock(&hv->vf_lock);
}
void hn_vf_stats_reset(struct rte_eth_dev *dev)