unsigned int i;
int ret;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ /* Check if process_private released. */
+ if (!dev->process_private)
+ return;
+ mlx5_tx_uar_uninit_secondary(dev);
+ mlx5_proc_priv_uninit(dev);
+ rte_eth_dev_release_port(dev);
+ return;
+ }
+ if (!priv->sh)
+ return;
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
dev->data->port_id,
((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
- if (priv->sh) {
- /*
- * Free the shared context in last turn, because the cleanup
- * routines above may use some shared fields, like
- * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
- * ifindex if Netlink fails.
- */
- mlx5_free_shared_ibctx(priv->sh);
- priv->sh = NULL;
- }
+ /*
+ * Free the shared context in last turn, because the cleanup
+ * routines above may use some shared fields, like
+ * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
+ * ifindex if Netlink fails.
+ */
+ mlx5_free_shared_ibctx(priv->sh);
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
unsigned int c = 0;
uint16_t port_id;
/* Receive command fd from primary process */
err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
if (err < 0)
- return NULL;
+ goto err_secondary;
/* Remap UAR for Tx queues. */
err = mlx5_tx_uar_init_secondary(eth_dev, err);
if (err)
- return NULL;
+ goto err_secondary;
/*
* Ethdev pointer is still required as input since
* the primary device is not accessible from the
eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
return eth_dev;
+err_secondary:
+ mlx5_dev_close(eth_dev);
+ return NULL;
}
/*
* Some parameters ("tx_db_nc" in particularly) are needed in
{
uint16_t port_id;
- RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
- rte_eth_dev_close(port_id);
+ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+ /*
+ * mlx5_dev_close() is not registered to secondary process,
+ * call the close function explicitly for secondary process.
+ */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ mlx5_dev_close(&rte_eth_devices[port_id]);
+ else
+ rte_eth_dev_close(port_id);
+ }
return 0;
}
const struct rte_eth_hairpin_conf *hairpin_conf);
void mlx5_tx_queue_release(void *dpdk_txq);
int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
+void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
enum mlx5_txq_obj_type type);
struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx);
munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
}
+/**
+ * Deinitialize Tx UAR registers for secondary process.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ unsigned int i;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ for (i = 0; i != priv->txqs_n; ++i) {
+ if (!(*priv->txqs)[i])
+ continue;
+ txq = (*priv->txqs)[i];
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ txq_uar_uninit_secondary(txq_ctrl);
+ }
+}
+
/**
* Initialize Tx UAR registers for secondary process.
*