#include "mlx5.h"
#include "mlx5_mr.h"
-#include "mlx5_rxtx.h"
#include "mlx5_rx.h"
+#include "mlx5_tx.h"
#include "mlx5_utils.h"
#include "rte_pmd_mlx5.h"
return -rte_errno;
}
+/**
+ * Translate the chunk address to MR key in order to put in into the cache.
+ */
+static void
+mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int idx)
+{
+ struct mlx5_rxq_data *rxq = opaque;
+
+ RTE_SET_USED(mp);
+ RTE_SET_USED(idx);
+ mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
+}
+
+/**
+ * Register Rx queue mempools and fill the Rx queue cache.
+ * This function tolerates repeated mempool registration.
+ *
+ * @param[in] rxq_ctrl
+ * Rx queue control data.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+static int
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_priv *priv = rxq_ctrl->priv;
+ struct rte_mempool *mp;
+ uint32_t s;
+ int ret = 0;
+
+ mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
+ /* MPRQ mempool is registered on creation, just fill the cache. */
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+ rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
+ mlx5_rxq_mempool_register_cb,
+ &rxq_ctrl->rxq);
+ return 0;
+ }
+ for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+ mp = rxq_ctrl->rxq.rxseg[s].mp;
+ ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+ priv->sh->pd, mp, &priv->mp_id);
+ if (ret < 0 && rte_errno != EEXIST)
+ return ret;
+ rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
+ &rxq_ctrl->rxq);
+ }
+ return 0;
+}
+
/**
* Stop traffic on Rx queues.
*
if (!rxq_ctrl)
continue;
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- /* Pre-register Rx mempools. */
- if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
- mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
- rxq_ctrl->rxq.mprq_mp);
- } else {
- uint32_t s;
-
- for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
- mlx5_mr_update_mp
- (dev, &rxq_ctrl->rxq.mr_ctrl,
- rxq_ctrl->rxq.rxseg[s].mp);
- }
+ /*
+ * Pre-register the mempools. Regardless of whether
+ * the implicit registration is enabled or not,
+ * Rx mempool destruction is tracked to free MRs.
+ */
+ if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+ goto error;
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+ txq_ctrl->hairpin_conf.peers[0].port != self_port) {
mlx5_txq_release(dev, i);
continue;
}
- if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
- continue;
if (txq_ctrl->hairpin_conf.manual_bind) {
mlx5_txq_release(dev, i);
return 0;
txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ /* Skip hairpin queues with other peer ports. */
+ if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+ txq_ctrl->hairpin_conf.peers[0].port != self_port) {
mlx5_txq_release(dev, i);
continue;
}
- /* Skip hairpin queues with other peer ports. */
- if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
- continue;
if (!txq_ctrl->obj) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no txq object found: %d",
uint32_t explicit;
uint16_t rx_queue;
- if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+ if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
rte_errno = ENODEV;
DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
return -rte_errno;
int ret;
uint16_t cur_port = priv->dev_data->port_id;
- if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+ if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
rte_errno = ENODEV;
DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
return -rte_errno;
{
int ret = 0;
uint16_t p, pp;
- struct mlx5_priv *priv = dev->data->dev_private;
/*
* If the Rx port has no hairpin configuration with the current port,
* information updating.
*/
if (rx_port == RTE_MAX_ETHPORTS) {
- MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(p, dev->device) {
ret = mlx5_hairpin_bind_single_port(dev, p);
if (ret != 0)
goto unbind;
return mlx5_hairpin_bind_single_port(dev, rx_port);
}
unbind:
- MLX5_ETH_FOREACH_DEV(pp, priv->pci_dev)
+ MLX5_ETH_FOREACH_DEV(pp, dev->device)
if (pp < p)
mlx5_hairpin_unbind_single_port(dev, pp);
return ret;
{
int ret = 0;
uint16_t p;
- struct mlx5_priv *priv = dev->data->dev_private;
if (rx_port == RTE_MAX_ETHPORTS)
- MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(p, dev->device) {
ret = mlx5_hairpin_unbind_single_port(dev, p);
if (ret != 0)
return ret;
dev->data->port_id, strerror(rte_errno));
goto error;
}
+ if ((priv->config.devx && priv->config.dv_flow_en &&
+ priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+ ret = priv->obj_ops.lb_dummy_queue_create(dev);
+ if (ret)
+ goto error;
+ }
ret = mlx5_txq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
goto error;
}
+ if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
+ DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
+ dev->data->port_id, rte_strerror(rte_errno));
+ goto error;
+ }
rte_wmb();
dev->tx_pkt_burst = mlx5_select_tx_function(dev);
dev->rx_pkt_burst = mlx5_select_rx_function(dev);
mlx5_traffic_disable(dev);
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
+ if (priv->obj_ops.lb_dummy_queue_release)
+ priv->obj_ops.lb_dummy_queue_release(dev);
mlx5_txpp_stop(dev); /* Stop last. */
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
/* Control flows for default traffic can be removed firstly. */
mlx5_traffic_disable(dev);
/* All RX queue flags will be cleared in the flush interface. */
- mlx5_flow_list_flush(dev, &priv->flows, true);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
+ mlx5_flow_meter_rxq_flush(dev);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
+ if (priv->obj_ops.lb_dummy_queue_release)
+ priv->obj_ops.lb_dummy_queue_release(dev);
mlx5_txpp_stop(dev);
return 0;
}
mlx5_txq_release(dev, i);
}
- if (priv->config.dv_esw_en && !priv->config.vf) {
+ if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
if (mlx5_flow_create_esw_table_zero_flow(dev))
priv->fdb_def_rule = 1;
else
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
void
mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
}
/**