return -rte_errno;
}
-/**
- * Translate the chunk address to MR key in order to put in into the cache.
- */
-static void
-mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
- struct rte_mempool_memhdr *memhdr,
- unsigned int idx)
-{
- struct mlx5_rxq_data *rxq = opaque;
-
- RTE_SET_USED(mp);
- RTE_SET_USED(idx);
- mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
-}
-
/**
* Register Rx queue mempools and fill the Rx queue cache.
* This function tolerates repeated mempool registration.
* 0 on success, (-1) on failure and rte_errno is set.
*/
static int
-mlx5_rxq_mempool_register(struct rte_eth_dev *dev,
- struct mlx5_rxq_ctrl *rxq_ctrl)
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = rxq_ctrl->sh;
struct rte_mempool *mp;
uint32_t s;
int ret = 0;
mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
/* MPRQ mempool is registered on creation, just fill the cache. */
- if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
- rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
- mlx5_rxq_mempool_register_cb,
- &rxq_ctrl->rxq);
- return 0;
- }
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+ rxq_ctrl->rxq.mprq_mp);
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+ bool is_extmem;
+
mp = rxq_ctrl->rxq.rxseg[s].mp;
- ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache,
- sh->cdev->pd, mp, &priv->mp_id);
+ is_extmem = (rte_pktmbuf_priv_flags(mp) &
+ RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
+ ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
+ is_extmem);
if (ret < 0 && rte_errno != EEXIST)
return ret;
- rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
- &rxq_ctrl->rxq);
+ ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+ mp);
+ if (ret < 0)
+ return ret;
}
return 0;
}
* the implicit registration is enabled or not,
* Rx mempool destruction is tracked to free MRs.
*/
- if (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0)
+ if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
return -rte_errno;
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
/*
* Unbind the hairpin port pair, HW configuration of both devices will be clear
- * and status will be reset for all the queues used between the them.
+ * and status will be reset for all the queues used between them.
* This function only supports to unbind the Tx from one Rx.
*
* @param dev
goto error;
}
mlx5_os_stats_init(dev);
+ /*
+ * Attach indirection table objects detached on port stop.
+ * They may be needed to create RSS in non-isolated mode.
+ */
+ ret = mlx5_action_handle_attach(dev);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u failed to attach indirect actions: %s",
+ dev->data->port_id, rte_strerror(rte_errno));
+ goto error;
+ }
ret = mlx5_traffic_enable(dev);
if (ret) {
DRV_LOG(ERR, "port %u failed to set defaults flows",
mlx5_rxq_timestamp_set(dev);
/* Set a mask and offset of scheduling on timestamp into Tx queues. */
mlx5_txq_dynf_timestamp_set(dev);
- /* Attach indirection table objects detached on port stop. */
- ret = mlx5_action_handle_attach(dev);
- if (ret) {
- DRV_LOG(ERR,
- "port %u failed to attach indirect actions: %s",
- dev->data->port_id, rte_strerror(rte_errno));
- goto error;
- }
/*
* In non-cached mode, it only needs to start the default mreg copy
* action and no flow created by application exists anymore.